source
stringlengths
3
92
c
stringlengths
26
2.25M
singlenode_reduce.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_REDUCE_H_ #define SRC_SINGLENODE_REDUCE_H_ template <typename T> void reduce_csr(T* a, int nnz, T* result, bool* res_set, void (*op_fp)(T, T, T*, void*), void* vsp) { if (nnz == 0) return; if (!(*res_set)) { (*result) = a[0]; for (int ii = 1; ii < nnz; ii++) { // std::cout << ii << "\t" << a[ii] << std::endl; T res_tmp = *result; op_fp(res_tmp, a[ii], result, vsp); (*res_set) = true; } } else { for (int ii = 0; ii < nnz; ii++) { T res_tmp = *result; op_fp(res_tmp, a[ii], result, vsp); } } } template <typename T> void reduce_dcsc(T* vals, int nnz, T* result, bool* res_set, void (*op_fp)(T, T, T*, void*), void* vsp) { if (nnz == 0) return; if (!(*res_set)) { (*result) = vals[0]; for (int ii = 1; ii < nnz; ii++) { T res_tmp = *result; op_fp(res_tmp, vals[ii], result, vsp); (*res_set) = true; } } else { for (int ii = 0; ii < nnz; ii++) { T res_tmp = *result; op_fp(res_tmp, vals[ii], result, vsp); } } } template <typename T> void reduce_dcsc(int* row_inds, int* col_ptrs, int* col_indices, T* vals, int num_partitions, int* row_pointers, int* col_starts, int* edge_pointers, T* yvalue, int * ybit_vector, void (*op_fp)(T, T, T*, void*), void* vsp) { // int * new_nnz = new int[num_partitions]; // memset(new_nnz, 0, num_partitions * sizeof(int)); #pragma omp parallel for schedule(dynamic, 1) for (int p = 0; p < num_partitions; p++) { // For each column const int* column_offset = col_indices + col_starts[p]; const int* partitioned_row_offset = row_inds + edge_pointers[p]; const Ta* partitioned_val_offset = vals + edge_pointers[p]; const int* col_ptrs_cur = col_ptrs + col_starts[p]; for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1 ; j++) { int col_index = col_indices[col_starts[p] + j]; { int nz_idx = col_ptrs_cur[j]; for (; nz_idx < col_ptrs_cur[j + 1]; nz_idx++) { int row_ind = partitioned_row_offset[nz_idx]; Ta Aval = partitioned_val_offset[nz_idx]; Ty temp_mul_result; if(get_bitvector(row_ind, ybit_vector)) { Ty temp_y_copy = yvalue[row_ind]; op_add(Aval, temp_y_copy, &(yvalue[row_ind])); } else { yvalue[row_ind] = Aval; set_bitvector(row_ind, ybit_vector); } } } } } } template <typename T> void reduce_dense_segment(T* value, int * bitvector, int nnz, T* result, bool* res_set, void (*op_fp)(T, T, T*, void*), void* vsp) { for(int i = 0 ; i < nnz ; i++) { if(get_bitvector(i, bitvector)) { T temp_result = *result; op_fp(temp_result, value[i], result, vsp); } } } template <typename VT, typename T> void mapreduce_dense_segment(VT* value, int * bitvector, int nnz, T* result, bool* res_set, void (*op_map)(VT*, T*, void*), void (*op_fp)(T, T, T*, void*), void* vsp) { for(int i = 0 ; i < nnz ; i++) { if(get_bitvector(i, bitvector)) { T temp_result = *result; T temp_result2; op_map(value + i, &temp_result2, vsp); op_fp(temp_result, temp_result2, result, vsp); } } } template <typename T> void reduce_dense(T* value, bool* bitvector, int m, int n, T* result, bool* res_set, T (*op_fp)(T, T)) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { if (bitvector[i + j * m]) { if (!(*res_set)) { (*res_set) = true; (*result) = value[i + j * m]; } else { T tmp = value[i + j * m]; (*result) = op_fp(tmp, (*result)); } } } } } template <typename T> void reduce_dense(T* value, int * bitvector, int m, int n, T* result, int * result_bitvector, void (*op_fp)(T, T, T*, void*), void* vsp) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { if(get_bitvector(i + j * m, bitvector)) { if(!get_bitvector(i, result_bitvector)) { set_bitvector(i, result_bitvector); result[i] = value[i + j * m]; } else { T tmp_result = result[i]; op_fp(value[i + j * m], tmp_result, &(result[i]), vsp); } } } } } #endif // SRC_SINGLENODE_REDUCE_H_
server.h
// // Created by liqinbin on 10/13/20. // #ifndef FEDTREE_SERVER_H #define FEDTREE_SERVER_H #include "FedTree/FL/party.h" #include "FedTree/dataset.h" #include "FedTree/Tree/tree_builder.h" //#include "FedTree/Encryption/HE.h" #include "FedTree/DP/noises.h" #include "FedTree/Tree/gbdt.h" #include "omp.h" // Todo: the server structure. class Server : public Party { public: void init(FLParam &param, int n_total_instances, vector<int> &n_instances_per_party); void horizontal_init (FLParam &param, int n_total_instances, vector<int> &n_instances_per_party, DataSet &dataSet); void vertical_init(FLParam &param, int n_total_instances, vector<int> &n_instances_per_party, vector<float_type> y, vector<float_type> label); void propose_split_candidates(); void send_info(string info_type); // void send_info(vector<Party> &parties, AdditivelyHE::PaillierPublicKey serverKey,vector<SplitCandidate>candidates); void sum_histograms(); void hybrid_merge_trees(); void ensemble_merge_trees(); GBDT global_trees; vector<GBDT> local_trees; GBDTParam model_param; int n_total_instances; vector<int> n_instances_per_party; // AdditivelyHE::PaillierPublicKey publicKey; // vector<AdditivelyHE::PaillierPublicKey> pk_vector; Paillier paillier; void send_key(Party &party) { party.paillier = paillier; } void homo_init() { paillier = Paillier(512); } void decrypt_gh(GHPair &gh) { gh.homo_decrypt(paillier); } void decrypt_gh_pairs(SyncArray<GHPair> &encrypted) { auto encrypted_data = encrypted.host_data(); #pragma omp parallel for for (int i = 0; i < encrypted.size(); i++) { encrypted_data[i].homo_decrypt(paillier); } } void encrypt_gh_pairs(SyncArray<GHPair> &raw) { auto raw_data = raw.host_data(); #pragma omp parallel for for (int i = 0; i < raw.size(); i++) { raw_data[i].homo_encrypt(paillier); } } private: // std::unique_ptr<TreeBuilder> fbuilder; DPnoises<double> DP; }; #endif //FEDTREE_SERVER_H
GrB_Matrix_export.c
//------------------------------------------------------------------------------ // GrB_Matrix_export: export a matrix in CSR, CSC, FullC, FullR, or COO format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Exports the contents of a matrix in one of 3 formats: CSR, CSC, or COO // (triplet format). The exported matrix is not modified. No typecast is // performed; the output array Ax must be of the same type as the input matrix // A. // The required sizes of the Ap, Ai, and Ax arrays are given by // GrB_Matrix_exportSize. // The GraphBLAS C API does not have a GrB* method to query the type of a // GrB_Matrix or the size of a type. SuiteSparse:GraphBLAS provides // GxB_Matrix_type_name to query the type of a matrix (returning a string), // which can be converted into a GrB_Type with GxB_Type_from_name. The size of // a type can be queried with GxB_Type_size. Using these methods, a user // application can ensure that its Ax array has the correct size for any // given GrB_Matrix it wishes to export, regardless of its type. #include "GB_transpose.h" #define GB_FREE_ALL \ { \ GB_phbix_free (T) ; \ } //------------------------------------------------------------------------------ // GB_export_worker: export a matrix of any type //------------------------------------------------------------------------------ static GrB_Info GB_export_worker // export a matrix ( GrB_Index *Ap, // pointers for CSR, CSC, row indices for COO GrB_Index *Ai, // row indices for CSR, CSC, col indices for COO void *Ax, // values (must match the type of A_input) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A_input, // matrix to export GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = A_input ; struct GB_Matrix_opaque T_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : case GrB_COO_FORMAT : GB_RETURN_IF_NULL (Ap) ; GB_RETURN_IF_NULL (Ap_len) ; GB_RETURN_IF_NULL (Ai) ; GB_RETURN_IF_NULL (Ai_len) ; default: GB_RETURN_IF_NULL (Ax) ; GB_RETURN_IF_NULL (Ax_len) ; } // finish any pending work GB_MATRIX_WAIT (A) ; //-------------------------------------------------------------------------- // determine current format of A and if a copy is needed //-------------------------------------------------------------------------- int sparsity = GB_sparsity (A) ; bool is_csc = A->is_csc ; bool make_copy ; bool csc_requested ; switch (format) { case GrB_CSR_FORMAT : make_copy = !(sparsity == GxB_SPARSE && !is_csc) ; csc_requested = false ; break ; case GrB_CSC_FORMAT : make_copy = !(sparsity == GxB_SPARSE && is_csc) ; csc_requested = true ; break ; // case GrB_DENSE_ROW_FORMAT : // if (!GB_is_dense (A)) // { // // A must dense or full // return (GrB_INVALID_VALUE) ; // } // make_copy = !(sparsity == GxB_FULL && !is_csc) ; // csc_requested = false ; // break ; // case GrB_DENSE_COL_FORMAT : // if (!GB_is_dense (A)) // { // // A must dense or full // return (GrB_INVALID_VALUE) ; // } // make_copy = !(sparsity == GxB_FULL && is_csc) ; // csc_requested = true ; // break ; case GrB_COO_FORMAT : // never make a copy to export in tuple format make_copy = false ; csc_requested = is_csc ; break ; default : // unknown format return (GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // create a copy if the matrix is not in the requested format //-------------------------------------------------------------------------- if (make_copy) { if (is_csc != csc_requested) { // T = A' GB_OK (GB_transpose_cast (T, A->type, csc_requested, A, false, Context)) ; } else { // T = A GB_OK (GB_dup_worker (&T, A->iso, A, true, A->type, Context)) ; } switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : GB_OK (GB_convert_any_to_sparse (T, Context)) ; break ; // case GrB_DENSE_ROW_FORMAT : // case GrB_DENSE_COL_FORMAT : // GB_convert_any_to_full (T) ; // break ; default : break ; } A = T ; } //-------------------------------------------------------------------------- // export the contents of the matrix //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; GrB_Index nvals = GB_nnz (A) ; int64_t plen = A->vdim+1 ; switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : if (plen > (*Ap_len) || nvals > (*Ai_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } GB_memcpy (Ap, A->p, plen * sizeof (GrB_Index), nthreads_max) ; GB_memcpy (Ai, A->i, nvals * sizeof (GrB_Index), nthreads_max) ; (*Ap_len) = plen ; (*Ai_len) = nvals ; // case GrB_DENSE_ROW_FORMAT : // case GrB_DENSE_COL_FORMAT : if (nvals > (*Ax_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } (*Ax_len) = nvals ; ASSERT (csc_requested == A->is_csc) ; if (A->iso) { // expand the iso A->x into the non-iso array Ax ASSERT (nvals > 0) ; GB_iso_expand (Ax, nvals, A->x, A->type->size, Context) ; } else { GB_memcpy (Ax, A->x, nvals * A->type->size, nthreads_max) ; } break ; default: case GrB_COO_FORMAT : if (nvals > (*Ap_len) || nvals > (*Ai_len) || nvals > (*Ax_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } GB_OK (GB_extractTuples (Ap, Ai, Ax, &nvals, A->type->code, A, Context)) ; (*Ap_len) = nvals ; (*Ai_len) = nvals ; (*Ax_len) = nvals ; break ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_ALL ; #pragma omp flush return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // GrB_Matrix_export_*: export a matrix of a given type //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL ; #define GB_EXPORT(prefix,ctype,T,acode) \ GrB_Info GB_EVAL3 (prefix, _Matrix_export_, T) /* export a matrix */ \ ( \ GrB_Index *Ap, /* pointers for CSR, CSC, row indices for COO */\ GrB_Index *Ai, /* row indices for CSR, CSC, col indices for COO */\ ctype *Ax, /* values (must match the type of A) */\ GrB_Index *Ap_len, /* number of entries in Ap (not # of bytes) */\ GrB_Index *Ai_len, /* number of entries in Ai (not # of bytes) */\ GrB_Index *Ax_len, /* number of entries in Ax (not # of bytes) */\ GrB_Format format, /* export format */\ GrB_Matrix A /* matrix to export */\ ) \ { \ GB_WHERE1 (GB_STR(prefix) "_Matrix_export_" GB_STR(T) \ " (Ap, Ai, Ax, &Ap_len, &Ai_len, &Ax_len, format, A)") ; \ GB_BURBLE_START (GB_STR(prefix) "_Matrix_export_" GB_STR(T)) ; \ GB_RETURN_IF_NULL_OR_FAULTY (A) ; \ if (A->type->code != acode) return (GrB_DOMAIN_MISMATCH) ; \ GrB_Info info = GB_export_worker (Ap, Ai, (void *) Ax, \ Ap_len, Ai_len, Ax_len, format, A, Context) ; \ GB_BURBLE_END ; \ return (info) ; \ } GB_EXPORT (GrB, bool , BOOL , GB_BOOL_code ) GB_EXPORT (GrB, int8_t , INT8 , GB_INT8_code ) GB_EXPORT (GrB, int16_t , INT16 , GB_INT16_code ) GB_EXPORT (GrB, int32_t , INT32 , GB_INT32_code ) GB_EXPORT (GrB, int64_t , INT64 , GB_INT64_code ) GB_EXPORT (GrB, uint8_t , UINT8 , GB_UINT8_code ) GB_EXPORT (GrB, uint16_t , UINT16 , GB_UINT16_code) GB_EXPORT (GrB, uint32_t , UINT32 , GB_UINT32_code) GB_EXPORT (GrB, uint64_t , UINT64 , GB_UINT64_code) GB_EXPORT (GrB, float , FP32 , GB_FP32_code ) GB_EXPORT (GrB, double , FP64 , GB_FP64_code ) GB_EXPORT (GxB, GxB_FC32_t, FC32 , GB_FC32_code ) GB_EXPORT (GxB, GxB_FC64_t, FC64 , GB_FC64_code ) GB_EXPORT (GrB, void , UDT , GB_UDT_code )
neighbor.h
#pragma once #ifdef USE_SIMPLEMAP #include "simplemap.hpp" #endif class ExPair{ public: PS::S64 id_in; PS::S64 id_out; PS::S64 id_cluster; PS::S32 * rank_list; static PS::S32 size; static PS::S32 rem; static PS::S32 n_bit; static void initialize() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_bit = 8 * sizeof(PS::S32); size = (PS::S32)std::ceil((PS::F64)n_proc/n_bit); rem = n_bit*size - n_proc; } static PS::S32 getSize() { return size+3; } ExPair(){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_out = id_cluster = 0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(PS::S64 id_in0, PS::S64 id_out0, PS::S64 id_cluster0){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_in0; id_out = id_out0; id_cluster = id_cluster0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(const ExPair & ep){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = ep.rank_list[i]; } ExPair &operator=(const ExPair & ep){ if ( this != &ep ){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] = ep.rank_list[i]; } return *this; } ~ExPair(){ delete [] rank_list; } PS::S64 getId() const { return id_in; } std::pair<PS::S64,PS::S64> getPair() const { return std::make_pair(id_in, id_out); } PS::S64 getIdCluster() const { return id_cluster; } PS::S64 setIdCluster(PS::S64 id_cluster0) { return id_cluster = id_cluster0; } PS::S32 input(PS::S32 * inp){ id_in = inp[1]; id_out = inp[0]; id_cluster = inp[2]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = inp[i+3]; return size+3; } PS::S32 output(PS::S32 * outp){ outp[0] = id_in; outp[1] = id_out; outp[2] = id_cluster; for ( PS::S32 i=0; i<size; i++ ) outp[i+3] = rank_list[i]; return size+3; } bool checkFlag(const PS::S32 i) const { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; return rank_list[n] & (1<<ii); } void setFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] |= (1<<ii); } void unsetFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] &= ~(1<<ii); } void resetFlag() { for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; } bool equalFlag(const ExPair & ep) const { bool check = true; for ( PS::S32 i=0; i<size; i++ ) check &= (rank_list[i]==ep.rank_list[i]); return check; } PS::S32 getMinFlag() const { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); for (PS::S32 i=0; i<n_proc; i++) if ( checkFlag(i) ) return i; return n_proc; } void operator &= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] &= ep.rank_list[i]; } void operator |= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] |= ep.rank_list[i]; } bool exchange(const ExPair & ep) { bool check = (this->id_cluster != ep.id_cluster); this->id_cluster = std::min(this->id_cluster, ep.id_cluster); for ( PS::S32 i=0; i<size; i++ ) { check |= (this->rank_list[i] != ep.rank_list[i]); this->rank_list[i] |= ep.rank_list[i]; } return check; } void show(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); std::cout << PS::Comm::getRank() << "\t" << id_in << "\t" << id_out << "\t" << id_cluster << "\t"; for ( PS::S32 i=0; i<n_proc; i++ ) std::cout << (checkFlag(i)); std::cout << std::endl; } }; PS::S32 ExPair::size; PS::S32 ExPair::rem; PS::S32 ExPair::n_bit; class NeighborList{ public: std::vector<std::vector<PS::S64> > n_list; #ifndef USE_SIMPLEMAP std::map<PS::S64, PS::S32> id_map; #else SimpleMapLib::Map<PS::S64, PS::S32> id_map; #endif std::vector<PS::S32> with_neighbor_list; std::vector<std::pair<PS::S32, PS::S32> > pair_list; std::vector<std::pair<PS::S64,PS::S64> > ex_list; std::vector<std::pair<PS::S32,PS::S32> > ex_adr_list; std::vector<PS::S32> connected_list; std::vector<std::vector<ExPair> > ex_data; std::map<std::pair<PS::S32,PS::S32>, std::pair<PS::S32, PS::S32> > ex_data_map; std::vector<std::vector<PS::S32> > recv_list; std::vector<std::vector<PS::S32> > send_list; std::vector<PS::S32> recv_rank_list; std::vector<PS::S32> send_rank_list; std::vector<std::vector<PS::S32> > ex_data_send; std::vector<std::vector<PS::S32> > ex_data_recv; std::vector<PS::S64> & operator[](PS::S32 i){ return n_list[i]; } NeighborList() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_list.clear(); id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); ex_data_send.clear(); ex_data_recv.clear(); ex_data.resize(n_proc); recv_list.resize(n_proc); send_list.resize(n_proc); #pragma omp parallel for for (PS::S32 i=0; i<n_proc; i++){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } ExPair::initialize(); } template <class Tpsys> void initializeList(Tpsys & pp) { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_loc = pp.getNumberOfParticleLocal(); n_list.clear(); //id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); ex_data_send.clear(); ex_data_recv.clear(); #pragma omp parallel for for ( PS::S32 i=0; i<n_proc; i++ ){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } n_list.resize(n_loc); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++) n_list.at(i).clear(); } ExPair & getExData(std::pair<PS::S32, PS::S32> adr) { return ex_data[adr.first][adr.second]; } PS::S32 getNumberOfParticlesWithNeighbor() const { return with_neighbor_list.size(); } PS::S32 getNumberOfNeighborPairsLocal() const { return pair_list.size(); } PS::S32 getNumberOfRankSend() const { return send_rank_list.size(); } PS::S32 getNumberOfRankRecv() const { return recv_rank_list.size(); } PS::S32 getNumberOfRankConnected() const { return connected_list.size(); } PS::S32 getNumberOfPairConnected(const PS::S32 ii) const { return ex_data[connected_list.at(ii)].size(); } template <class Tpsys> void addNeighbor(Tpsys & pp, PS::S32 i, PS::S64 j_id, PS::S32 j_rank, PS::S32 j_id_local=-1) { n_list[i].push_back(j_id); pp[i].neighbor ++; pp[i].id_cluster = std::min(pp[i].id_cluster, j_id); if ( j_rank != pp[i].myrank ) { #pragma omp critical { ex_list.push_back(std::make_pair(pp[i].id, j_id)); ex_adr_list.push_back(std::make_pair(j_rank, ex_data.at(j_rank).size())); ex_data_map[std::make_pair(pp[i].id, j_id)] = std::make_pair(j_rank, ex_data.at(j_rank).size()); ExPair ex_pair(pp[i].id, j_id, pp[i].id_cluster); ex_pair.setFlag(pp[i].myrank); ex_pair.setFlag(j_rank); ex_data.at(j_rank).push_back(ex_pair); } pp[i].inDomain = false; } else { if ( j_id_local < 0 ) j_id_local = id_map.at(j_id); if ( i<j_id_local ) { #pragma omp critical { pair_list.push_back(std::make_pair(i, j_id_local)); } } } } template <class Tpsys> void checkNeighbor(Tpsys & pp) { const PS::S32 n_loc = n_list.size(); bool check = true; PS::S32 nei_tot = 0; for ( PS::S32 i=0; i<n_loc; i++ ) { if ( !pp[i].isDead ) assert ( id_map.at(pp[i].id) == i ); } for ( PS::S32 i=0; i<n_loc; i++ ) { PS::S32 n_ngb = n_list.at(i).size(); //if ( pp[i].neighbor ) // std::cout << pp[i].id << "\t"; nei_tot += n_ngb; for ( PS::S32 jj=0; jj<n_ngb; jj++ ) { PS::S32 j_id = n_list.at(i).at(jj); //if ( pp[i].neighbor ) // std::cout << j_id << " "; auto itr = id_map.find(j_id); if ( itr == id_map.end() ) continue; #ifndef USE_SIMPLEMAP PS::S32 j = itr->second; #else PS::S32 j = id_map.second(itr); #endif PS::S32 n_ngb_j = n_list.at(j).size(); PS::S32 n_p = 0; for ( PS::S32 k=0; k<n_ngb_j; k++ ) { PS::S32 k_id = n_list.at(j).at(k); auto itr1 = id_map.find(k_id); if ( itr1 == id_map.end() ) continue; #ifndef USE_SIMPLEMAP auto ss = itr1->second; #else auto ss = id_map.second(itr1); #endif if ( (ss) == i ) n_p ++ ; } if ( n_p != 1 ) { std::cout << i << "\t" << pp[i].id << "\t" << j << "\t" << j_id << std::endl; std::cout << "Neighbor of " << pp[i].id << ": "; for (PS::S32 k=0; k<n_list.at(i).size(); k++) std::cout << n_list.at(i).at(k) << "\t"; std::cout << std::endl; std::cout << "Neighbor of " << j_id << ": "; for (PS::S32 k=0; k<n_list.at(j).size(); k++) std::cout << n_list.at(j).at(k) << "\t"; std::cout << std::endl; check = check && false; check = check && false; } } //if ( pp[i].neighbor ) // std::cout << std::endl; } PS::S32 nei_tot_glb = PS::Comm::getSum(nei_tot); assert ( nei_tot_glb%2 == 0 ); if ( false ) { PS::Abort(); } } void createConnectedRankList(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); connected_list.clear(); for ( PS::S32 i=0; i<n_proc; i++ ) { if ( ex_data[i].size() ) { connected_list.push_back(i); assert( i != PS::Comm::getRank() ); } } } void resizeExDataBuffer() { PS::S32 n_send = connected_list.size(); ex_data_send.resize(n_send); ex_data_recv.resize(n_send); for ( PS::S32 i=0; i<n_send; i++ ) { PS::S32 n_size = ex_data[connected_list.at(i)].size() * ExPair::getSize(); ex_data_send.at(i).resize(n_size); ex_data_recv.at(i).resize(n_size); } } template <class Tpsys> void makeIdMap(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); id_map.clear(); //assert( (PS::S32)(n_list.size()) == n_loc ); #ifdef USE_SIMPLEMAP id_map.resize(n_loc); #pragma omp parallel for schedule(static) #endif for(PS::S32 i=0; i<n_loc; i++){ //assert( pp[i].neighbor == (PS::S32)(n_list[i].size()) ); if ( !pp[i].isDead ) { #ifndef USE_SIMPLEMAP id_map[pp[i].id] = i; #else id_map.set(pp[i].id, i); #endif }else{ #ifdef USE_SIMPLEMAP id_map.set(-1, i); #endif } } #ifdef USE_SIMPLEMAP id_map.makemap(); #endif } #if 1 template <class Tpsys> void createNeighborCluster(Tpsys & pp){ //const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_wngb = with_neighbor_list.size(); const PS::S32 n_pair = pair_list.size(); bool check = true; while( check ){ check = false; #pragma omp parallel for reduction (||:check) for(PS::S32 ii=0; ii<n_pair; ii++){ PS::S32 i = pair_list.at(ii).first; PS::S32 j = pair_list.at(ii).second; if ( pp[i].id_cluster != pp[j].id_cluster ) { #pragma omp critical { pp[i].id_cluster = pp[j].id_cluster = std::min(pp[i].id_cluster, pp[j].id_cluster); } check = check || true; } } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 ii=0; ii<n_wngb; ii++){ PS::S32 i = with_neighbor_list.at(ii); for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #else template <class Tpsys> void createNeighborCluster(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); PS::S64 j_id_cluster = 0; PS::S64 id_cluster[n_loc]; bool check = true; while( check ){ check = false; #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ PS::S64 j_id = 0; PS::S32 nei = 0; nei = pp[i].neighbor; id_cluster[i] = pp[i].id_cluster; if(nei == 0) continue; for(PS::S32 j=0; j<nei; j++){ auto itr = id_map.find(n_list[i].at(j)); if ( itr == id_map.end() ) continue; #ifndef USE_SIMPLEMAP j_id = itr->second; #else j_id = id_map.second(itr); #endif j_id_cluster = pp[j_id].id_cluster; if( id_cluster[i] > j_id_cluster ) id_cluster[i] = j_id_cluster; } } #pragma omp parallel for reduction (||:check) for(PS::S32 i=0; i<n_loc; i++){ if ( pp[i].id_cluster != id_cluster[i] ) { check = check || true; pp[i].id_cluster = id_cluster[i]; } assert( pp[i].id >= id_cluster[i] ); } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #endif template <class Tpsys> void inputExData(Tpsys & pp){ const PS::S32 n_out = ex_list.size(); #pragma omp parallel for for ( PS::S32 j=0; j<n_out; j++ ){ std::pair<PS::S64,PS::S64> pair = ex_list.at(j); std::pair<PS::S32,PS::S32> ex_adr = ex_adr_list.at(j); assert( getExData(ex_adr).getId() == pair.first ); getExData(ex_adr).setIdCluster(pp[id_map.at(pair.first)].id_cluster); } for ( PS::S32 j=0; j<n_out; j++ ){ //std::pair<PS::S32,PS::S32> pair = ex_list.at(j); std::pair<PS::S32,PS::S32> ex_adr = ex_adr_list.at(j); //assert( getExData(ex_adr).getId() == pair.first ); //getExData(ex_adr).setIdCluster(pp[id_map.at(pair.first)].id_cluster); for ( PS::S32 k=0; k<n_out; k++ ){ if ( k == j ) continue; //std::pair<PS::S32,PS::S32> pair2 = ex_list.at(k); std::pair<PS::S32,PS::S32> ex_adr2 = ex_adr_list.at(k); if ( getExData(ex_adr2).getIdCluster() == getExData(ex_adr).getIdCluster() ) { getExData(ex_adr).exchange(getExData(ex_adr2)); } } } } template <class Tpsys> bool exchangeExData(Tpsys & pp, PS::S32 TAG){ //PS::S32** & ex_data_send, //PS::S32** & ex_data_recv){ //const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_send = connected_list.size(); //PS::S32 ** ex_data_send = new PS::S32*[n_send]; //PS::S32 ** ex_data_recv = new PS::S32*[n_send]; //for ( PS::S32 ii=0; ii<n_send; ii++ ) { // PS::S32 i = connected_list.at(ii); // PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); // ex_data_send[ii] = new PS::S32[n_size]; // ex_data_recv[ii] = new PS::S32[n_size]; //} #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { jj += ex_data[i][j].output(&ex_data_send[ii][jj]); } } #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); MPI_Isend(&ex_data_send[ii][0], n_size, PS::GetDataType(ex_data_send[ii][0]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_data_recv[ii][0], n_size, PS::GetDataType(ex_data_recv[ii][0]), i, TAG, MPI_COMM_WORLD, &req1[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); #else assert ( n_send == 0 ); #endif bool check = false; #pragma omp parallel for reduction (||:check) for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { ExPair recv_pair; jj += recv_pair.input(&ex_data_recv[ii][jj]); std::pair<PS::S32,PS::S32> adr = ex_data_map.at(recv_pair.getPair()); assert ( adr.first == i ); assert ( recv_pair.getPair() == getExData(adr).getPair() ); bool check_1 = getExData(adr).exchange(recv_pair); check = check || check_1; //getExData(adr).show(); #pragma omp critical { PS::S32 i_loc = id_map.at(getExData(adr).getId()); pp[i_loc].id_cluster = std::min(pp[i_loc].id_cluster, getExData(adr).getIdCluster()); } } //delete [] ex_data_send[ii]; //delete [] ex_data_recv[ii]; } //delete [] ex_data_send; //delete [] ex_data_recv; //PS::Comm::barrier(); //bool check_glb = PS::Comm::synchronizeConditionalBranchOR(check); return check; } template <class Tpsys> void selectSendRecvParticle(Tpsys & pp){ const PS::S32 myrank = PS::Comm::getRank(); const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_ptcl = ex_list.size(); std::vector<PS::S64> ex_cluster; std::vector<std::pair<PS::S32,PS::S32> > ex_cluster_adr; ex_cluster.clear(); ex_cluster_adr.clear(); for ( PS::S32 ii=0; ii<n_ptcl; ii++ ) { //std::pair<PS::S32,PS::S32> pair = ex_list.at(ii); std::pair<PS::S32,PS::S32> adr = ex_adr_list.at(ii); PS::S64 id_cluster = getExData(adr).id_cluster; PS::S32 n_l = ex_cluster.size(); std::pair<PS::S32,PS::S32> adr2 = std::make_pair(-1,-1); for (PS::S32 j=0; j<n_l; j++){ if ( id_cluster == ex_cluster.at(j) ){ adr2 = ex_cluster_adr.at(j); assert( getExData(adr).equalFlag(getExData(adr2)) ); } } if ( adr2 == std::make_pair(-1,-1) ){ ex_cluster.push_back(id_cluster); ex_cluster_adr.push_back(adr); PS::S32 min_rank = getExData(adr).getMinFlag(); if ( min_rank == myrank ) { for ( PS::S32 j=0; j<n_proc; j++ ) { if ( getExData(adr).checkFlag(j) ) { if ( j == myrank ) continue; recv_list[j].push_back(id_cluster); assert ( j > myrank ); } } } else { assert ( min_rank < myrank ); send_list[min_rank].push_back(id_cluster); } } } for ( PS::S32 i=0; i<n_proc; i++ ) { if ( recv_list[i].size() ) recv_rank_list.push_back(i); if ( send_list[i].size() ) send_rank_list.push_back(i); } } private: void operator =(const NeighborList& NL){} NeighborList(const NeighborList& NL) {} }; template <class Tp> class ExParticleSystem { public : PS::S32 n_send; PS::S32 n_recv; PS::S32 n_ex_ptcl_send_tot; PS::S32 n_ex_nei_send_tot; PS::S32 n_ex_ptcl_recv_tot; PS::S32 n_ex_nei_recv_tot; std::vector<Tp> ex_ptcl_send; std::vector<PS::S64> ex_nei_send; std::vector<Tp> ex_ptcl_recv; std::vector<PS::S64> ex_nei_recv; std::vector<std::vector<PS::S32> > ex_ptcl_send_list; std::vector<PS::S64*> n_list; std::vector<PS::S32> n_ex_ptcl_send; std::vector<PS::S32> n_ex_nei_send; std::vector<PS::S32> n_ex_ptcl_recv; std::vector<PS::S32> n_ex_nei_recv; std::vector<PS::S32> adr_ex_ptcl_send; std::vector<PS::S32> adr_ex_nei_send; std::vector<PS::S32> adr_ex_ptcl_recv; std::vector<PS::S32> adr_ex_nei_recv; Tp & operator[](PS::S32 i){ return ex_ptcl_recv[i]; } PS::S32 getNumberOfParticleLocal() const { return n_ex_ptcl_recv_tot; } void initialize() { n_send = n_recv = 0; n_ex_ptcl_send_tot = n_ex_ptcl_recv_tot = 0; n_ex_nei_send_tot = n_ex_nei_recv_tot = 0; ex_ptcl_send.clear(); ex_nei_send.clear(); ex_ptcl_recv.clear(); ex_nei_recv.clear(); ex_ptcl_send_list.clear(); n_ex_ptcl_send.clear(); n_ex_nei_send.clear(); n_ex_ptcl_recv.clear(); n_ex_nei_recv.clear(); adr_ex_ptcl_send.clear(); adr_ex_nei_send.clear(); adr_ex_ptcl_recv.clear(); adr_ex_nei_recv.clear(); } void resize(PS::S32 n_send0, PS::S32 n_recv0){ n_send = n_send0; n_ex_ptcl_send.resize(n_send); n_ex_nei_send.resize(n_send); adr_ex_ptcl_send.resize(n_send); adr_ex_nei_send.resize(n_send); ex_ptcl_send_list.resize(n_send); #pragma omp parallel for for ( PS::S32 i=0; i<n_send; i++ ) ex_ptcl_send_list[i].clear(); n_recv = n_recv0; n_ex_ptcl_recv.resize(n_recv); n_ex_nei_recv.resize(n_recv); adr_ex_ptcl_recv.resize(n_recv); adr_ex_nei_recv.resize(n_recv); } PS::S32 getNumberOfParticleSend() const { return n_ex_ptcl_send_tot; } PS::S32 getNumberOfParticleRecv() const { return n_ex_ptcl_recv_tot; } PS::S32 getNumberOfNeighborSend() const { return n_ex_nei_send_tot; } PS::S32 getNumberOfNeighborRecv() const { return n_ex_nei_recv_tot; } template <class Tpsys> void inputNumberOfExParticleSend(Tpsys & pp, NeighborList & NList){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) n_ex_ptcl_send[ii] = n_ex_nei_send[ii] = 0; if ( n_send ) { #pragma omp parallel for for ( PS::S32 i=0; i<n_loc; i++) { if ( !pp[i].inDomain ) { for ( PS::S32 jj=0; jj<n_send; jj++ ){ PS::S32 j = NList.send_rank_list[jj]; PS::S32 n_data = NList.send_list[j].size(); for ( PS::S32 k=0; k<n_data; k++ ) { if ( NList.send_list[j][k] == pp[i].id_cluster ) { #pragma omp critical { n_ex_ptcl_send[jj] ++; n_ex_nei_send[jj] += pp[i].neighbor; assert ( pp[i].neighbor == (PS::S32)(NList.n_list[i].size()) ); ex_ptcl_send_list[jj].push_back(i); } } } } } } } #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) assert( ex_ptcl_send_list[ii].size() ); } void sendRecvNumberOfExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&n_ex_ptcl_send[ii], 1, PS::GetDataType(n_ex_ptcl_send[0]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&n_ex_nei_send[ii], 1, PS::GetDataType(n_ex_nei_send[0]), i, TAG+1, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&n_ex_ptcl_recv[ii], 1, PS::GetDataType(n_ex_ptcl_recv[0]), i, TAG, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&n_ex_nei_recv[ii], 1, PS::GetDataType(n_ex_nei_recv[0]), i, TAG+1, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputAdress(){ n_ex_ptcl_send_tot = n_ex_nei_send_tot = 0; for (PS::S32 i=0; i<n_send; i++){ adr_ex_ptcl_send.at(i) = n_ex_ptcl_send_tot; adr_ex_nei_send.at(i) = n_ex_nei_send_tot; n_ex_ptcl_send_tot += n_ex_ptcl_send.at(i); n_ex_nei_send_tot += n_ex_nei_send.at(i); } n_ex_ptcl_recv_tot = n_ex_nei_recv_tot = 0; for (PS::S32 i=0; i<n_recv; i++){ adr_ex_ptcl_recv.at(i) = n_ex_ptcl_recv_tot; adr_ex_nei_recv.at(i) = n_ex_nei_recv_tot; n_ex_ptcl_recv_tot += n_ex_ptcl_recv.at(i); n_ex_nei_recv_tot += n_ex_nei_recv.at(i); } ex_ptcl_send.resize(n_ex_ptcl_send_tot); ex_nei_send.resize(n_ex_nei_send_tot); ex_ptcl_recv.resize(n_ex_ptcl_recv_tot); ex_nei_recv.resize(n_ex_nei_recv_tot); n_list.resize(n_ex_ptcl_recv_tot); } template <class Tpsys> void inputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); PS::S32 adr_nei = adr_ex_nei_send.at(ii); PS::S32 n_nei = 0; for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); pp[j].isSent = true; ex_ptcl_send.at(adr_ptcl + jj) = pp[j]; assert( !pp[j].inDomain ); for ( PS::S32 k=0; k<pp[j].neighbor; k++ ) { ex_nei_send.at(adr_nei + n_nei) = NList.n_list[j].at(k); n_nei ++; } } assert ( n_ex_nei_send.at(ii) == n_nei ); } } void sendRecvExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+2, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+3, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+2, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+3, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputNeighborListOfExParticleRecv() { #pragma omp parallel for for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 n_data = n_ex_ptcl_recv.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_recv.at(ii); PS::S32 n_nei = adr_ex_nei_recv.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { n_list.at(adr_ptcl + jj) = &(ex_nei_recv.at(n_nei)); n_nei += ex_ptcl_recv.at(adr_ptcl + jj).neighbor; assert ( ex_ptcl_recv.at(adr_ptcl + jj).isSent ); } if ( ii+1<n_recv ) assert ( adr_ex_nei_recv.at(ii+1) == n_nei ); } } void returnExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Irecv(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+4, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+5, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Isend(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+4, MPI_COMM_WORLD, &req2[ii]); MPI_Isend(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+5, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } template <class Tpsys> void outputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); PS::S32 id_pre = pp[j].id; pp[j] = ex_ptcl_send.at(adr_ptcl + jj); if (!pp[j].isDead) assert( pp[j].id == id_pre ); } } } };
reduction.c
#include <stdio.h> #include <omp.h> int main(){ int A[10] = {0,1,2,3,4,5,6,7,8,9}, i, m, sum = 0; omp_set_dynamic(0); m = omp_get_num_procs(); omp_set_num_threads(m); printf("Parallel\n------------"); #pragma omp parallel for reduction(+:sum) for(i = 0; i < 10; i++){ sum += A[i]; printf("\n%d from thread %d of %d", sum, omp_get_thread_num(), omp_get_num_threads()); } printf("\n\nSum = %d\n", sum); return 0; }
task_dep-1.c
/* { dg-do run } */ #include <stdlib.h> int main() { int x = 1; #pragma omp parallel #pragma omp single { #pragma omp task shared(x) depend(out: x) x = 2; #pragma omp task shared(x) depend(in: x) if (x != 2) abort (); } return 0; }
GB_binop__bxor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint8) // A*D function (colscale): GB (_AxD__bxor_uint8) // D*A function (rowscale): GB (_DxB__bxor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint8) // C=scalar+B GB (_bind1st__bxor_uint8) // C=scalar+B' GB (_bind1st_tran__bxor_uint8) // C=A+scalar GB (_bind2nd__bxor_uint8) // C=A'+scalar GB (_bind2nd_tran__bxor_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT8 || GxB_NO_BXOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bxor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ark_brusselator1D_omp.c
/*--------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU *--------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *--------------------------------------------------------------- * Example problem: * * The following test simulates a brusselator problem from chemical * kinetics. This is n PDE system with 3 components, Y = [u,v,w], * satisfying the equations, * u_t = du*u_xx + a - (w+1)*u + v*u^2 * v_t = dv*v_xx + w*u - v*u^2 * w_t = dw*w_xx + (b-w)/ep - w*u * for t in [0, 80], x in [0, 1], with initial conditions * u(0,x) = a + 0.1*sin(pi*x) * v(0,x) = b/a + 0.1*sin(pi*x) * w(0,x) = b + 0.1*sin(pi*x), * and with stationary boundary conditions, i.e. * u_t(t,0) = u_t(t,1) = 0, * v_t(t,0) = v_t(t,1) = 0, * w_t(t,0) = w_t(t,1) = 0. * Note: these can also be implemented as Dirichlet boundary * conditions with values identical to the initial conditions. * * The spatial derivatives are computed using second-order * centered differences, with the data distributed over N points * on a uniform spatial grid. * * This program solves the problem with the DIRK method, using a * Newton iteration with the band linear solver, and a * user-supplied Jacobian routine. This example uses the OpenMP * vector kernel, and employs OpenMP threading within the * right-hand side and Jacobian construction functions. * * 100 outputs are printed at equal intervals, and run statistics * are printed at the end. *---------------------------------------------------------------*/ /* Header files */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */ #include <nvector/nvector_openmp.h> /* access to OpenMP N_Vector */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <sundials/sundials_types.h> /* def. of type 'realtype' */ #ifdef _OPENMP #include <omp.h> /* OpenMP functions */ #endif #if defined(SUNDIALS_EXTENDED_PRECISION) #define GSYM "Lg" #define ESYM "Le" #define FSYM "Lf" #else #define GSYM "g" #define ESYM "e" #define FSYM "f" #endif /* accessor macros between (x,v) location and 1D NVector array */ #define IDX(x,v) (3*(x)+v) /* user data structure */ typedef struct { sunindextype N; /* number of intervals */ int nthreads; /* number of OpenMP threads */ realtype dx; /* mesh spacing */ realtype a; /* constant forcing on u */ realtype b; /* steady-state value of w */ realtype du; /* diffusion coeff for u */ realtype dv; /* diffusion coeff for v */ realtype dw; /* diffusion coeff for w */ realtype ep; /* stiffness parameter */ } *UserData; /* User-supplied Functions Called by the Solver */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data); static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* Private helper functions */ static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata); static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata); /* Private function to check function return values */ static int check_flag(void *flagvalue, const char *funcname, int opt); /* Main Program */ int main(int argc, char *argv[]) { /* general problem parameters */ realtype T0 = RCONST(0.0); /* initial time */ realtype Tf = RCONST(10.0); /* final time */ int Nt = 100; /* total number of output times */ int Nvar = 3; /* number of solution fields */ UserData udata = NULL; realtype *data; sunindextype N = 201; /* spatial mesh size */ realtype a = 0.6; /* problem parameters */ realtype b = 2.0; realtype du = 0.025; realtype dv = 0.025; realtype dw = 0.025; realtype ep = 1.0e-5; /* stiffness parameter */ realtype reltol = 1.0e-6; /* tolerances */ realtype abstol = 1.0e-10; sunindextype NEQ, i; /* general problem variables */ int flag; /* reusable error-checking flag */ N_Vector y = NULL; /* empty vector for storing solution */ N_Vector umask = NULL; /* empty mask vectors for viewing solution components */ N_Vector vmask = NULL; N_Vector wmask = NULL; SUNMatrix A = NULL; /* empty matrix for linear solver */ SUNLinearSolver LS = NULL; /* empty linear solver structure */ void *arkode_mem = NULL; /* empty ARKode memory structure */ realtype pi, t, dTout, tout, u, v, w; FILE *FID, *UFID, *VFID, *WFID; int iout, num_threads; long int nst, nst_a, nfe, nfi, nsetups, nje, nfeLS, nni, ncfn, netf; /* allocate udata structure */ udata = (UserData) malloc(sizeof(*udata)); if (check_flag((void *) udata, "malloc", 2)) return 1; /* set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* store the inputs in the UserData structure */ udata->N = N; udata->a = a; udata->b = b; udata->du = du; udata->dv = dv; udata->dw = dw; udata->ep = ep; udata->nthreads = num_threads; /* set total allocated vector length */ NEQ = Nvar*udata->N; /* Initial problem output */ printf("\n1D Brusselator PDE test problem:\n"); printf(" N = %li, NEQ = %li\n", (long int) udata->N, (long int) NEQ); printf(" num_threads = %i\n", num_threads); printf(" problem parameters: a = %"GSYM", b = %"GSYM", ep = %"GSYM"\n", udata->a, udata->b, udata->ep); printf(" diffusion coefficients: du = %"GSYM", dv = %"GSYM", dw = %"GSYM"\n", udata->du, udata->dv, udata->dw); printf(" reltol = %.1"ESYM", abstol = %.1"ESYM"\n\n", reltol, abstol); /* Initialize vector data structures */ y = N_VNew_OpenMP(NEQ, num_threads); /* Create vector for solution */ if (check_flag((void *)y, "N_VNew_OpenMP", 0)) return 1; udata->dx = RCONST(1.0)/(N-1); /* set spatial mesh spacing */ data = N_VGetArrayPointer(y); /* Access data array for new NVector y */ if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1; umask = N_VNew_OpenMP(NEQ, num_threads); /* Create vector masks */ if (check_flag((void *)umask, "N_VNew_OpenMP", 0)) return 1; vmask = N_VNew_OpenMP(NEQ, num_threads); if (check_flag((void *)vmask, "N_VNew_OpenMP", 0)) return 1; wmask = N_VNew_OpenMP(NEQ, num_threads); if (check_flag((void *)wmask, "N_VNew_OpenMP", 0)) return 1; /* Set initial conditions into y */ pi = RCONST(4.0)*atan(RCONST(1.0)); for (i=0; i<N; i++) { data[IDX(i,0)] = a + RCONST(0.1)*sin(pi*i*udata->dx); /* u */ data[IDX(i,1)] = b/a + RCONST(0.1)*sin(pi*i*udata->dx); /* v */ data[IDX(i,2)] = b + RCONST(0.1)*sin(pi*i*udata->dx); /* w */ } /* Set mask array values for each solution component */ N_VConst(0.0, umask); data = N_VGetArrayPointer(umask); if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) data[IDX(i,0)] = RCONST(1.0); N_VConst(0.0, vmask); data = N_VGetArrayPointer(vmask); if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) data[IDX(i,1)] = RCONST(1.0); N_VConst(0.0, wmask); data = N_VGetArrayPointer(wmask); if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) data[IDX(i,2)] = RCONST(1.0); /* Initialize matrix and linear solver data structures */ A = SUNBandMatrix(NEQ, 4, 4); if (check_flag((void *)A, "SUNBandMatrix", 0)) return 1; LS = SUNLinSol_Band(y, A); if (check_flag((void *)LS, "SUNLinSol_Band", 0)) return 1; /* Call ARKStepCreate to initialize the ARK timestepper module and specify the right-hand side function in y'=f(t,y), the inital time T0, and the initial dependent variable vector y. Note: since this problem is fully implicit, we set f_E to NULL and f_I to f. */ arkode_mem = ARKStepCreate(NULL, f, T0, y); if (check_flag((void *)arkode_mem, "ARKStepCreate", 0)) return 1; /* Set routines */ flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */ if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1; flag = ARKStepSStolerances(arkode_mem, reltol, abstol); /* Specify tolerances */ if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1; /* Linear solver specification */ flag = ARKStepSetLinearSolver(arkode_mem, LS, A); /* Attach matrix and linear solver */ if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacFn(arkode_mem, Jac); /* Set the Jacobian routine */ if (check_flag(&flag, "ARKStepSetJacFn", 1)) return 1; /* output spatial mesh to disk */ FID=fopen("bruss_mesh.txt","w"); for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i); fclose(FID); /* Open output stream for results, access data arrays */ UFID=fopen("bruss_u.txt","w"); VFID=fopen("bruss_v.txt","w"); WFID=fopen("bruss_w.txt","w"); /* output initial condition to disk */ data = N_VGetArrayPointer(y); if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]); for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]); for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]); fprintf(UFID,"\n"); fprintf(VFID,"\n"); fprintf(WFID,"\n"); /* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then prints results. Stops when the final time has been reached */ t = T0; dTout = (Tf-T0)/Nt; tout = T0+dTout; printf(" t ||u||_rms ||v||_rms ||w||_rms\n"); printf(" ----------------------------------------------\n"); for (iout=0; iout<Nt; iout++) { flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */ if (check_flag(&flag, "ARKStepEvolve", 1)) break; u = N_VWL2Norm(y,umask); /* access/print solution statistics */ u = sqrt(u*u/N); v = N_VWL2Norm(y,vmask); v = sqrt(v*v/N); w = N_VWL2Norm(y,wmask); w = sqrt(w*w/N); printf(" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM"\n", t, u, v, w); if (flag >= 0) { /* successful solve: update output time */ tout += dTout; tout = (tout > Tf) ? Tf : tout; } else { /* unsuccessful solve: break */ fprintf(stderr,"Solver failure, stopping integration\n"); break; } /* output results to disk */ for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]); for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]); for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]); fprintf(UFID,"\n"); fprintf(VFID,"\n"); fprintf(WFID,"\n"); } printf(" ----------------------------------------------\n"); fclose(UFID); fclose(VFID); fclose(WFID); /* Print some final statistics */ flag = ARKStepGetNumSteps(arkode_mem, &nst); check_flag(&flag, "ARKStepGetNumSteps", 1); flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a); check_flag(&flag, "ARKStepGetNumStepAttempts", 1); flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi); check_flag(&flag, "ARKStepGetNumRhsEvals", 1); flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups); check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1); flag = ARKStepGetNumErrTestFails(arkode_mem, &netf); check_flag(&flag, "ARKStepGetNumErrTestFails", 1); flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni); check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1); flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn); check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1); flag = ARKStepGetNumJacEvals(arkode_mem, &nje); check_flag(&flag, "ARKStepGetNumJacEvals", 1); flag = ARKStepGetNumLinRhsEvals(arkode_mem, &nfeLS); check_flag(&flag, "ARKStepGetNumLinRhsEvals", 1); printf("\nFinal Solver Statistics:\n"); printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a); printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi); printf(" Total linear solver setups = %li\n", nsetups); printf(" Total RHS evals for setting up the linear system = %li\n", nfeLS); printf(" Total number of Jacobian evaluations = %li\n", nje); printf(" Total number of Newton iterations = %li\n", nni); printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn); printf(" Total number of error test failures = %li\n\n", netf); /* Clean up and return with successful completion */ free(udata); /* Free user data */ ARKStepFree(&arkode_mem); /* Free integrator memory */ SUNLinSolFree(LS); /* Free linear solver */ SUNMatDestroy(A); /* Free matrix */ N_VDestroy(y); /* Free vectors */ N_VDestroy(umask); N_VDestroy(vmask); N_VDestroy(wmask); return 0; } /*------------------------------- * Functions called by the solver *-------------------------------*/ /* f routine to compute the ODE RHS function f(t,y). */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data) { UserData udata = (UserData) user_data; /* access problem data */ sunindextype N = udata->N; /* set variable shortcuts */ realtype a = udata->a; realtype b = udata->b; realtype ep = udata->ep; realtype du = udata->du; realtype dv = udata->dv; realtype dw = udata->dw; realtype dx = udata->dx; realtype *Ydata=NULL, *dYdata=NULL; realtype uconst, vconst, wconst, u, ul, ur, v, vl, vr, w, wl, wr; sunindextype i; /* clear out ydot (to be careful) */ N_VConst(0.0, ydot); Ydata = N_VGetArrayPointer(y); /* access data arrays */ if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1; dYdata = N_VGetArrayPointer(ydot); if (check_flag((void *)dYdata, "N_VGetArrayPointer", 0)) return 1; N_VConst(0.0, ydot); /* initialize ydot to zero */ /* iterate over domain, computing all equations */ uconst = du/dx/dx; vconst = dv/dx/dx; wconst = dw/dx/dx; #pragma omp parallel for default(shared) private(i,u,ul,ur,v,vl,vr,w,wl,wr) schedule(static) num_threads(udata->nthreads) for (i=1; i<N-1; i++) { /* set shortcuts */ u = Ydata[IDX(i,0)]; ul = Ydata[IDX(i-1,0)]; ur = Ydata[IDX(i+1,0)]; v = Ydata[IDX(i,1)]; vl = Ydata[IDX(i-1,1)]; vr = Ydata[IDX(i+1,1)]; w = Ydata[IDX(i,2)]; wl = Ydata[IDX(i-1,2)]; wr = Ydata[IDX(i+1,2)]; /* u_t = du*u_xx + a - (w+1)*u + v*u^2 */ dYdata[IDX(i,0)] = (ul - RCONST(2.0)*u + ur)*uconst + a - (w+RCONST(1.0))*u + v*u*u; /* v_t = dv*v_xx + w*u - v*u^2 */ dYdata[IDX(i,1)] = (vl - RCONST(2.0)*v + vr)*vconst + w*u - v*u*u; /* w_t = dw*w_xx + (b-w)/ep - w*u */ dYdata[IDX(i,2)] = (wl - RCONST(2.0)*w + wr)*wconst + (b-w)/ep - w*u; } /* enforce stationary boundaries */ dYdata[IDX(0,0)] = dYdata[IDX(0,1)] = dYdata[IDX(0,2)] = 0.0; dYdata[IDX(N-1,0)] = dYdata[IDX(N-1,1)] = dYdata[IDX(N-1,2)] = 0.0; return 0; } /* Jacobian routine to compute J(t,y) = df/dy. */ static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { UserData udata = (UserData) user_data; /* access problem data */ SUNMatZero(J); /* Initialize Jacobian to zero */ /* Fill in the Laplace matrix */ if (LaplaceMatrix(RCONST(1.0), J, udata)) { printf("Jacobian calculation error in calling LaplaceMatrix!\n"); return 1; } /* Add in the Jacobian of the reaction terms matrix */ if (ReactionJac(RCONST(1.0), y, J, udata)) { printf("Jacobian calculation error in calling ReactionJac!\n"); return 1; } return 0; } /*------------------------------- * Private helper functions *-------------------------------*/ /* Routine to compute the stiffness matrix from (L*y), scaled by the factor c. We add the result into Jac and do not erase what was already there */ static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata) { sunindextype N = udata->N; /* set shortcuts */ realtype dx = udata->dx; sunindextype i; realtype uconst = c*udata->du/dx/dx; realtype vconst = c*udata->dv/dx/dx; realtype wconst = c*udata->dw/dx/dx; /* iterate over intervals, filling in Jacobian entries */ #pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads) for (i=1; i<N-1; i++) { /* Jacobian of (L*y) at this node */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i-1,0)) += uconst; SM_ELEMENT_B(Jac,IDX(i,1),IDX(i-1,1)) += vconst; SM_ELEMENT_B(Jac,IDX(i,2),IDX(i-1,2)) += wconst; SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) -= RCONST(2.0)*uconst; SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) -= RCONST(2.0)*vconst; SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) -= RCONST(2.0)*wconst; SM_ELEMENT_B(Jac,IDX(i,0),IDX(i+1,0)) += uconst; SM_ELEMENT_B(Jac,IDX(i,1),IDX(i+1,1)) += vconst; SM_ELEMENT_B(Jac,IDX(i,2),IDX(i+1,2)) += wconst; } return 0; } /* Routine to compute the Jacobian matrix from R(y), scaled by the factor c. We add the result into Jac and do not erase what was already there */ static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata) { sunindextype N = udata->N; /* set shortcuts */ realtype ep = udata->ep; sunindextype i; realtype u, v, w; realtype *Ydata = N_VGetArrayPointer(y); /* access solution array */ if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1; /* iterate over nodes, filling in Jacobian entries */ #pragma omp parallel for default(shared) private(i,u,v,w) schedule(static) num_threads(udata->nthreads) for (i=1; i<N-1; i++) { /* set nodal value shortcuts (shifted index due to start at first interior node) */ u = Ydata[IDX(i,0)]; v = Ydata[IDX(i,1)]; w = Ydata[IDX(i,2)]; /* all vars wrt u */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) += c*(RCONST(2.0)*u*v-(w+RCONST(1.0))); SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,0)) += c*(w - RCONST(2.0)*u*v); SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,0)) += c*(-w); /* all vars wrt v */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,1)) += c*(u*u); SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) += c*(-u*u); /* all vars wrt w */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,2)) += c*(-u); SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,2)) += c*(u); SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) += c*(-RCONST(1.0)/ep - u); } return 0; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return 1; }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } return 0; } /*---- end of file ----*/
convolution_3x3_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8b-8a-inch/8a-64-outch/8b kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)2u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack8.channel(q / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); const float* k40 = k4.row(p + i); const float* k50 = k5.row(p + i); const float* k60 = k6.row(p + i); const float* k70 = k7.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k40[k]; g00[5] = (__fp16)k50[k]; g00[6] = (__fp16)k60[k]; g00[7] = (__fp16)k70[k]; g00 += 8; } } } } } static void conv3x3s1_winograd64_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { __fp16* tm2p = tm2.row<__fp16>(i / 12); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const __fp16* r0 = bb2.row<const __fp16>(i / 12); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123 "fmla v20.8h, v12.8h, v0.h[0] \n" "fmla v21.8h, v12.8h, v0.h[1] \n" "fmla v22.8h, v12.8h, v0.h[2] \n" "fmla v23.8h, v12.8h, v0.h[3] \n" "fmla v24.8h, v12.8h, v0.h[4] \n" "fmla v25.8h, v12.8h, v0.h[5] \n" "fmla v26.8h, v12.8h, v0.h[6] \n" "fmla v27.8h, v12.8h, v0.h[7] \n" "fmla v28.8h, v12.8h, v1.h[0] \n" "fmla v29.8h, v12.8h, v1.h[1] \n" "fmla v30.8h, v12.8h, v1.h[2] \n" "fmla v31.8h, v12.8h, v1.h[3] \n" "fmla v20.8h, v13.8h, v1.h[4] \n" "fmla v21.8h, v13.8h, v1.h[5] \n" "fmla v22.8h, v13.8h, v1.h[6] \n" "fmla v23.8h, v13.8h, v1.h[7] \n" "fmla v24.8h, v13.8h, v2.h[0] \n" "fmla v25.8h, v13.8h, v2.h[1] \n" "fmla v26.8h, v13.8h, v2.h[2] \n" "fmla v27.8h, v13.8h, v2.h[3] \n" "fmla v28.8h, v13.8h, v2.h[4] \n" "fmla v29.8h, v13.8h, v2.h[5] \n" "fmla v30.8h, v13.8h, v2.h[6] \n" "fmla v31.8h, v13.8h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v20.8h, v14.8h, v3.h[0] \n" "fmla v21.8h, v14.8h, v3.h[1] \n" "fmla v22.8h, v14.8h, v3.h[2] \n" "fmla v23.8h, v14.8h, v3.h[3] \n" "fmla v24.8h, v14.8h, v3.h[4] \n" "fmla v25.8h, v14.8h, v3.h[5] \n" "fmla v26.8h, v14.8h, v3.h[6] \n" "fmla v27.8h, v14.8h, v3.h[7] \n" "fmla v28.8h, v14.8h, v4.h[0] \n" "fmla v29.8h, v14.8h, v4.h[1] \n" "fmla v30.8h, v14.8h, v4.h[2] \n" "fmla v31.8h, v14.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567 "fmla v20.8h, v15.8h, v4.h[4] \n" "fmla v21.8h, v15.8h, v4.h[5] \n" "fmla v22.8h, v15.8h, v4.h[6] \n" "fmla v23.8h, v15.8h, v4.h[7] \n" "fmla v24.8h, v15.8h, v5.h[0] \n" "fmla v25.8h, v15.8h, v5.h[1] \n" "fmla v26.8h, v15.8h, v5.h[2] \n" "fmla v27.8h, v15.8h, v5.h[3] \n" "fmla v28.8h, v15.8h, v5.h[4] \n" "fmla v29.8h, v15.8h, v5.h[5] \n" "fmla v30.8h, v15.8h, v5.h[6] \n" "fmla v31.8h, v15.8h, v5.h[7] \n" "fmla v20.8h, v16.8h, v6.h[0] \n" "fmla v21.8h, v16.8h, v6.h[1] \n" "fmla v22.8h, v16.8h, v6.h[2] \n" "fmla v23.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v16.8h, v6.h[4] \n" "fmla v25.8h, v16.8h, v6.h[5] \n" "fmla v26.8h, v16.8h, v6.h[6] \n" "fmla v27.8h, v16.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v7.h[0] \n" "fmla v29.8h, v16.8h, v7.h[1] \n" "fmla v30.8h, v16.8h, v7.h[2] \n" "fmla v31.8h, v16.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011 "fmla v20.8h, v17.8h, v7.h[4] \n" "fmla v21.8h, v17.8h, v7.h[5] \n" "fmla v22.8h, v17.8h, v7.h[6] \n" "fmla v23.8h, v17.8h, v7.h[7] \n" "fmla v24.8h, v17.8h, v8.h[0] \n" "fmla v25.8h, v17.8h, v8.h[1] \n" "fmla v26.8h, v17.8h, v8.h[2] \n" "fmla v27.8h, v17.8h, v8.h[3] \n" "fmla v28.8h, v17.8h, v8.h[4] \n" "fmla v29.8h, v17.8h, v8.h[5] \n" "fmla v30.8h, v17.8h, v8.h[6] \n" "fmla v31.8h, v17.8h, v8.h[7] \n" "fmla v20.8h, v18.8h, v9.h[0] \n" "fmla v21.8h, v18.8h, v9.h[1] \n" "fmla v22.8h, v18.8h, v9.h[2] \n" "fmla v23.8h, v18.8h, v9.h[3] \n" "fmla v24.8h, v18.8h, v9.h[4] \n" "fmla v25.8h, v18.8h, v9.h[5] \n" "fmla v26.8h, v18.8h, v9.h[6] \n" "fmla v27.8h, v18.8h, v9.h[7] \n" "fmla v28.8h, v18.8h, v10.h[0] \n" "fmla v29.8h, v18.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v10.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.8h, v19.8h, v10.h[4] \n" "fmla v21.8h, v19.8h, v10.h[5] \n" "fmla v22.8h, v19.8h, v10.h[6] \n" "fmla v23.8h, v19.8h, v10.h[7] \n" "fmla v24.8h, v19.8h, v11.h[0] \n" "fmla v25.8h, v19.8h, v11.h[1] \n" "fmla v26.8h, v19.8h, v11.h[2] \n" "fmla v27.8h, v19.8h, v11.h[3] \n" "fmla v28.8h, v19.8h, v11.h[4] \n" "fmla v29.8h, v19.8h, v11.h[5] \n" "fmla v30.8h, v19.8h, v11.h[6] \n" "fmla v31.8h, v19.8h, v11.h[7] \n" "bne 0b \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "fmla v16.8h, v12.8h, v4.h[0] \n" "fmla v17.8h, v12.8h, v4.h[1] \n" "fmla v18.8h, v12.8h, v4.h[2] \n" "fmla v19.8h, v12.8h, v4.h[3] \n" "fmla v20.8h, v12.8h, v4.h[4] \n" "fmla v21.8h, v12.8h, v4.h[5] \n" "fmla v22.8h, v12.8h, v4.h[6] \n" "fmla v23.8h, v12.8h, v4.h[7] \n" "fmla v16.8h, v13.8h, v5.h[0] \n" "fmla v17.8h, v13.8h, v5.h[1] \n" "fmla v18.8h, v13.8h, v5.h[2] \n" "fmla v19.8h, v13.8h, v5.h[3] \n" "fmla v20.8h, v13.8h, v5.h[4] \n" "fmla v21.8h, v13.8h, v5.h[5] \n" "fmla v22.8h, v13.8h, v5.h[6] \n" "fmla v23.8h, v13.8h, v5.h[7] \n" "fmla v16.8h, v14.8h, v6.h[0] \n" "fmla v17.8h, v14.8h, v6.h[1] \n" "fmla v18.8h, v14.8h, v6.h[2] \n" "fmla v19.8h, v14.8h, v6.h[3] \n" "fmla v20.8h, v14.8h, v6.h[4] \n" "fmla v21.8h, v14.8h, v6.h[5] \n" "fmla v22.8h, v14.8h, v6.h[6] \n" "fmla v23.8h, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v7.h[0] \n" "fmla v17.8h, v15.8h, v7.h[1] \n" "fmla v18.8h, v15.8h, v7.h[2] \n" "fmla v19.8h, v15.8h, v7.h[3] \n" "fmla v20.8h, v15.8h, v7.h[4] \n" "fmla v21.8h, v15.8h, v7.h[5] \n" "fmla v22.8h, v15.8h, v7.h[6] \n" "fmla v23.8h, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v18.8h, v8.8h, v2.h[0] \n" "fmla v19.8h, v8.8h, v3.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v2.h[1] \n" "fmla v19.8h, v9.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v3.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v18.8h, v11.8h, v2.h[3] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v18.8h, v12.8h, v2.h[4] \n" "fmla v19.8h, v12.8h, v3.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v18.8h, v13.8h, v2.h[5] \n" "fmla v19.8h, v13.8h, v3.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "fmla v18.8h, v14.8h, v2.h[6] \n" "fmla v19.8h, v14.8h, v3.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "fmla v18.8h, v15.8h, v2.h[7] \n" "fmla v19.8h, v15.8h, v3.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8h}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "bne 0b \n" "st1 {v16.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x8_t _bias0 = bias ? vld1q_f16((const __fp16*)bias + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[6][8][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _out0tm6 = vld1q_f16(output0_tm_6); float16x8_t _out0tm7 = vld1q_f16(output0_tm_7); float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6); float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)); float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06); float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f))); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 32, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f))); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 24, _out03); vst1q_f16(output0 + 40, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 8; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07 "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v4.h[0] \n" "fmla v31.8h, v16.8h, v6.h[0] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v18.8h, v6.h[2] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v2.h[3] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v20.8h, v4.h[4] \n" "fmla v31.8h, v20.8h, v6.h[4] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v22.8h, v6.h[6] \n" "fmla v28.8h, v23.8h, v0.h[7] \n" "fmla v29.8h, v23.8h, v2.h[7] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v16.8h, v5.h[0] \n" "fmla v31.8h, v16.8h, v7.h[0] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v17.8h, v5.h[1] \n" "fmla v31.8h, v17.8h, v7.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v18.8h, v5.h[2] \n" "fmla v31.8h, v18.8h, v7.h[2] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v3.h[3] \n" "fmla v30.8h, v19.8h, v5.h[3] \n" "fmla v31.8h, v19.8h, v7.h[3] \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v20.8h, v5.h[4] \n" "fmla v31.8h, v20.8h, v7.h[4] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v21.8h, v5.h[5] \n" "fmla v31.8h, v21.8h, v7.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v22.8h, v5.h[6] \n" "fmla v31.8h, v22.8h, v7.h[6] \n" "fmla v28.8h, v23.8h, v1.h[7] \n" "fmla v29.8h, v23.8h, v3.h[7] \n" "fmla v30.8h, v23.8h, v5.h[7] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1] \n" // r08 "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v16.8h, v6.h[0] \n" "fmla v31.8h, v16.8h, v0.h[0] \n" "fmla v28.8h, v17.8h, v2.h[1] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v17.8h, v6.h[1] \n" "fmla v31.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v18.8h, v6.h[2] \n" "fmla v31.8h, v18.8h, v0.h[2] \n" "fmla v28.8h, v19.8h, v2.h[3] \n" "fmla v29.8h, v19.8h, v4.h[3] \n" "fmla v30.8h, v19.8h, v6.h[3] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v20.8h, v6.h[4] \n" "fmla v31.8h, v20.8h, v0.h[4] \n" "fmla v28.8h, v21.8h, v2.h[5] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v21.8h, v6.h[5] \n" "fmla v31.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v0.h[6] \n" "fmla v28.8h, v23.8h, v2.h[7] \n" "fmla v29.8h, v23.8h, v4.h[7] \n" "fmla v30.8h, v23.8h, v6.h[7] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v16.8h, v12.h[0] \n" "fmla v31.8h, v16.8h, v14.h[0] \n" "fmla v28.8h, v17.8h, v8.h[1] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v14.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v8.h[2] \n" "fmla v29.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v18.8h, v14.h[2] \n" "fmla v28.8h, v19.8h, v8.h[3] \n" "fmla v29.8h, v19.8h, v10.h[3] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v14.h[3] \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v20.8h, v12.h[4] \n" "fmla v31.8h, v20.8h, v14.h[4] \n" "fmla v28.8h, v21.8h, v8.h[5] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v14.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v8.h[6] \n" "fmla v29.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v22.8h, v14.h[6] \n" "fmla v28.8h, v23.8h, v8.h[7] \n" "fmla v29.8h, v23.8h, v10.h[7] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v14.h[7] \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v16.8h, v13.h[0] \n" "fmla v31.8h, v16.8h, v15.h[0] \n" "fmla v28.8h, v17.8h, v9.h[1] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v17.8h, v13.h[1] \n" "fmla v31.8h, v17.8h, v15.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v9.h[2] \n" "fmla v29.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v18.8h, v13.h[2] \n" "fmla v31.8h, v18.8h, v15.h[2] \n" "fmla v28.8h, v19.8h, v9.h[3] \n" "fmla v29.8h, v19.8h, v11.h[3] \n" "fmla v30.8h, v19.8h, v13.h[3] \n" "fmla v31.8h, v19.8h, v15.h[3] \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v20.8h, v13.h[4] \n" "fmla v31.8h, v20.8h, v15.h[4] \n" "fmla v28.8h, v21.8h, v9.h[5] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v21.8h, v13.h[5] \n" "fmla v31.8h, v21.8h, v15.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v9.h[6] \n" "fmla v29.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v22.8h, v13.h[6] \n" "fmla v31.8h, v22.8h, v15.h[6] \n" "fmla v28.8h, v23.8h, v9.h[7] \n" "fmla v29.8h, v23.8h, v11.h[7] \n" "fmla v30.8h, v23.8h, v13.h[7] \n" "fmla v31.8h, v23.8h, v15.h[7] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v8.8h}, [%2] \n" // r18 "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v16.8h, v14.h[0] \n" "fmla v31.8h, v16.8h, v8.h[0] \n" "fmla v28.8h, v17.8h, v10.h[1] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "fmla v30.8h, v17.8h, v14.h[1] \n" "fmla v31.8h, v17.8h, v8.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v10.h[2] \n" "fmla v29.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v18.8h, v14.h[2] \n" "fmla v31.8h, v18.8h, v8.h[2] \n" "fmla v28.8h, v19.8h, v10.h[3] \n" "fmla v29.8h, v19.8h, v12.h[3] \n" "fmla v30.8h, v19.8h, v14.h[3] \n" "fmla v31.8h, v19.8h, v8.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v20.8h, v14.h[4] \n" "fmla v31.8h, v20.8h, v8.h[4] \n" "fmla v28.8h, v21.8h, v10.h[5] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v21.8h, v14.h[5] \n" "fmla v31.8h, v21.8h, v8.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v10.h[6] \n" "fmla v29.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v22.8h, v14.h[6] \n" "fmla v31.8h, v22.8h, v8.h[6] \n" "fmla v28.8h, v23.8h, v10.h[7] \n" "fmla v29.8h, v23.8h, v12.h[7] \n" "fmla v30.8h, v23.8h, v14.h[7] \n" "fmla v31.8h, v23.8h, v8.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v4.h[0] \n" "fmla v31.8h, v16.8h, v6.h[0] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v18.8h, v6.h[2] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v2.h[3] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v20.8h, v4.h[4] \n" "fmla v31.8h, v20.8h, v6.h[4] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v22.8h, v6.h[6] \n" "fmla v28.8h, v23.8h, v0.h[7] \n" "fmla v29.8h, v23.8h, v2.h[7] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v16.8h, v5.h[0] \n" "fmla v31.8h, v16.8h, v7.h[0] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v17.8h, v5.h[1] \n" "fmla v31.8h, v17.8h, v7.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v18.8h, v5.h[2] \n" "fmla v31.8h, v18.8h, v7.h[2] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v3.h[3] \n" "fmla v30.8h, v19.8h, v5.h[3] \n" "fmla v31.8h, v19.8h, v7.h[3] \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v20.8h, v5.h[4] \n" "fmla v31.8h, v20.8h, v7.h[4] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v21.8h, v5.h[5] \n" "fmla v31.8h, v21.8h, v7.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v22.8h, v5.h[6] \n" "fmla v31.8h, v22.8h, v7.h[6] \n" "fmla v28.8h, v23.8h, v1.h[7] \n" "fmla v29.8h, v23.8h, v3.h[7] \n" "fmla v30.8h, v23.8h, v5.h[7] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3] \n" // r28 "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v16.8h, v6.h[0] \n" "fmla v31.8h, v16.8h, v0.h[0] \n" "fmla v28.8h, v17.8h, v2.h[1] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v17.8h, v6.h[1] \n" "fmla v31.8h, v17.8h, v0.h[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v18.8h, v6.h[2] \n" "fmla v31.8h, v18.8h, v0.h[2] \n" "fmla v28.8h, v19.8h, v2.h[3] \n" "fmla v29.8h, v19.8h, v4.h[3] \n" "fmla v30.8h, v19.8h, v6.h[3] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v20.8h, v6.h[4] \n" "fmla v31.8h, v20.8h, v0.h[4] \n" "fmla v28.8h, v21.8h, v2.h[5] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v21.8h, v6.h[5] \n" "fmla v31.8h, v21.8h, v0.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v0.h[6] \n" "fmla v28.8h, v23.8h, v2.h[7] \n" "fmla v29.8h, v23.8h, v4.h[7] \n" "fmla v30.8h, v23.8h, v6.h[7] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "sub %4, %4, #1088 \n" // kptr -= 8.5 * 64; "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1] \n" // r04 "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v6.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v6.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v6.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v6.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v5.h[0] \n" "fmla v29.8h, v16.8h, v7.h[0] \n" "fmla v30.8h, v17.8h, v5.h[1] \n" "fmla v31.8h, v17.8h, v7.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v5.h[2] \n" "fmla v29.8h, v18.8h, v7.h[2] \n" "fmla v30.8h, v19.8h, v5.h[3] \n" "fmla v31.8h, v19.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.8h}, [%2] \n" // r14 "fmla v28.8h, v20.8h, v5.h[4] \n" "fmla v29.8h, v20.8h, v7.h[4] \n" "fmla v30.8h, v21.8h, v5.h[5] \n" "fmla v31.8h, v21.8h, v7.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v5.h[6] \n" "fmla v29.8h, v22.8h, v7.h[6] \n" "fmla v30.8h, v23.8h, v5.h[7] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "fmla v28.8h, v16.8h, v6.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v6.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v6.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v6.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.8h, v20.8h, v6.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v6.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v6.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v6.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3] \n" // r24 "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v0.h[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v0.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "fadd v28.8h, v28.8h, v30.8h \n" "fadd v29.8h, v29.8h, v31.8h \n" "sub %4, %4, #1088 \n" // kptr -= 8.5 * 64; "st1 {v28.8h, v29.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%0, #128] \n" "ld1 {v31.8h}, [%0] \n" // sum0 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmul v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12 "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "fmla v28.8h, v16.8h, v5.h[0] \n" "fmla v29.8h, v17.8h, v5.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v5.h[2] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22 "fmla v28.8h, v20.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v5.h[6] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "add %1, %1, #32 \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "add %2, %2, #32 \n" "fadd v28.8h, v28.8h, v29.8h \n" "fadd v30.8h, v30.8h, v31.8h \n" "add %3, %3, #32 \n" "fadd v28.8h, v28.8h, v30.8h \n" "sub %4, %4, #1088 \n" // kptr -= 8.5 * 64; "st1 {v28.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
terrain.c
#include "blocko.h" float hmap[TILESW][TILESD]; float hmap2[TILESW][TILESD]; int tscootx, tscootz, tchunk_scootx, tchunk_scootz; void gen_hmap(int x0, int x2, int z0, int z2) { unsigned seed = SEED4(x0, x2, z0, z2); // pick corners if they aren't set if (hmap[x0][z0] == 0) hmap[x0][z0] = RANDI(64, 127); if (hmap[x0][z2] == 0) hmap[x0][z2] = RANDI(64, 127); if (hmap[x2][z0] == 0) hmap[x2][z0] = RANDI(64, 127); if (hmap[x2][z2] == 0) hmap[x2][z2] = RANDI(64, 127); int x1 = (x0 + x2) / 2; int z1 = (z0 + z2) / 2; int w = (x2 - x0) / 4; int d = (z2 - z0) / 4; w = w ? w : 1; d = d ? d : 1; float d2 = d / 2.f; float r = w > 2 ? 1.f : 0.f; // edges middles if (!hmap[x0][z1]) hmap[x0][z1] = (hmap[x0][z0] + hmap[x0][z2]) / 2.f + r * RANDF(-d2, d2); if (!hmap[x2][z1]) hmap[x2][z1] = (hmap[x2][z0] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2); if (!hmap[x1][z0]) hmap[x1][z0] = (hmap[x0][z0] + hmap[x2][z0]) / 2.f + r * RANDF(-d2, d2); if (!hmap[x1][z2]) hmap[x1][z2] = (hmap[x0][z2] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2); // middle middle hmap[x1][z1] = (hmap[x0][z1] + hmap[x2][z1] + hmap[x1][z0] + hmap[x1][z2]) / 4.f + r * RANDF(-d, d); // recurse if there are any unfilled spots if(x1 - x0 > 1 || x2 - x1 > 1 || z1 - z0 > 1 || z2 - z1 > 1) { gen_hmap(x0, x1, z0, z1); gen_hmap(x0, x1, z1, z2); gen_hmap(x1, x2, z0, z1); gen_hmap(x1, x2, z1, z2); } } void smooth_hmap() { for (int x = 0; x < TILESW; x++) for (int z = 0; z < TILESD; z++) { float p365 = noise(x, 0, -z, 365); int radius = p365 < 0.0f ? 3 : p365 < 0.2f ? 2 : 1; int x0 = x - radius; int x1 = x + radius + 1; int z0 = z - radius; int z1 = z + radius + 1; CLAMP(x0, 0, TILESW-1); CLAMP(x1, 0, TILESW-1); CLAMP(z0, 0, TILESD-1); CLAMP(z1, 0, TILESD-1); int sum = 0, n = 0; for (int i = x0; i < x1; i++) for (int j = z0; j < z1; j++) { sum += hmap[i][j]; n++; } int res = sum / n; float p800 = noise(x, 0, z, 800); float p777 = noise(z, 0, x, 777); float p301 = noise(x, 0, z, 301); float p204 = noise(x, 0, z, 204); float p33 = noise(x, 0, z, 32 * (1.1 + p301)); float swoosh = p33 > 0.3 ? (10 - 30 * (p33 - 0.3)) : 0; float times = (p204 * 20.f) + 30.f; float plus = (-p204 * 40.f) + 60.f; CLAMP(times, 20.f, 40.f); CLAMP(plus, 40.f, 80.f); int beach_ht = (1.f - p777) * times + plus; CLAMP(beach_ht, 90, 100); if (res > beach_ht) // beaches { if (res > beach_ht + 21) res -= 18; else res = ((res - beach_ht) / 7) + beach_ht; } float s = (1 + p204) * 0.2; if (p800 > 0.0 + s) { float t = (p800 - 0.0 - s) * 10; CLAMP(t, 0.f, 1.f); res = lerp(t, res, 102); if (res == 102 && swoosh) res = 101; } hmap2[x][z] = res < TILESH - 1 ? res : TILESH - 1; } } void create_hmap() { // generate in pieces for (int i = 0; i < 8; i++) for (int j = 0; j < 8; j++) { int x0 = (i ) * TILESW / 8; int x1 = (i+1) * TILESW / 8; int z0 = (j ) * TILESD / 8; int z1 = (j+1) * TILESD / 8; CLAMP(x1, 0, TILESW-1); CLAMP(z1, 0, TILESD-1); gen_hmap(x0, x1, z0 , z1); } smooth_hmap(); } void gen_chunk(int xlo, int xhi, int zlo, int zhi) { CLAMP(xlo, 0, TILESW-1); CLAMP(xhi, 0, TILESW-1); CLAMP(zlo, 0, TILESD-1); CLAMP(zhi, 0, TILESD-1); static char column_already_generated[TILESW][TILESD]; int x; #pragma omp parallel for for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++) { if (x == xlo && z == zlo) omp_threads = omp_get_num_threads(); if (column_already_generated[x][z]) continue; column_already_generated[x][z] = true; float p1080 = noise(x, 0, -z, 1080); float p530 = noise(z, 0, x, 530); float p630 = noise(-z, 0, x, 629); float p200 = noise(x, 0, z, 200); float p80 = noise(x, 0, z, 80); float p15 = noise(z, 0, -x, 15); //float p5 = noise(-x, 0, z, 5); if (p200 > 0.2f) { float flatten = (p200 - 0.2f) * 80; CLAMP(flatten, 1, 12); hmap2[x][z] -= 100; hmap2[x][z] /= flatten; hmap2[x][z] += 100; } int solid_depth = 0; int slicey_bit = false; int plateau_bit = false; int mode = p1080 > 0 ? 1 : 10; for (int y = 0; y < TILESH; y++) { if (y == TILESH - 1) { TT_(x, y, z) = HARD; continue; } float p300 = noise(x, y, z, 300); float p32 = noise(x, y*mode, z, 16 + 16 * (1.1 + p300)); float plat = p32 > 0.3 ? (10 - 30 * (p32 * p32 * p32 - 0.3)) : 0; float p90 = noise(x, y, z, 90); float p91 = noise(x+1000, y+1000, z+1000, 91); float p42 = noise(x, y*(p300 + 1), z, 42); float p9 = noise(x, y*0.05, z, 9); float p2 = noise(-z, y, x, 2); if (p300 + fabsf(p80) * 0.25 + p15 * 0.125 < -0.5) { plat = -plat; } else if (p300 < 0.5) { plat = 0; } int cave = (p90 < -0.24 || p91 < -0.24) && (p42 > 0.5 && p9 < 0.4); if (y > hmap2[x][z] - ((p80 + 1) * 20) && p90 > 0.4 && p91 > 0.4 && p42 > 0.01 && p42 < 0.09 && p300 > 0.3) slicey_bit = true; int platted = y < hmap2[x][z] + plat * (mode * 0.125f + 0.875f); if ((cave || platted) && !plateau_bit) { unsigned seed = SEED2(x, z); if (!slicey_bit || RANDP(5)) { int type = (y > 100 && hmap2[x][z] > 99) ? WATR : OPEN; //only allow water below low heightmap TT_(x, y, z) = type; solid_depth = 0; slicey_bit = false; goto out; } } else { if (mode == 10 && plat && !cave && y < hmap2[x][z]) plateau_bit = true; slicey_bit = false; } solid_depth++; float p16 = noise(x, y, z, 16); int slv = 76 + p530 * 20; int dlv = 86 + p630 * 20; int ore = p2 > 0.4f ? ORE : OREH; int ston = p42 > 0.4f && p9 < -0.3f ? ore : STON; if (slicey_bit) TT_(x, y, z) = p9 > 0.4f ? HARD : SAND; else if (solid_depth > 14 + 5 * p9) TT_(x, y, z) = GRAN; else if (y < slv - 5 * p16) TT_(x, y, z) = ston; else if (y < dlv - 5 * p16) TT_(x, y, z) = p80 > (-solid_depth * 0.1f) ? DIRT : OPEN; // erosion else if (y < 100 - 5 * p16) TT_(x, y, z) = solid_depth == 1 ? GRAS : DIRT; else if (y < 120 ) TT_(x, y, z) = solid_depth < 4 + 5 * p9 ? SAND : ston; else TT_(x, y, z) = HARD; out: ; } } // find nearby bezier curvy caves #define REGW (CHUNKW*16) #define REGD (CHUNKD*16) // find region ,-- have to add 1 bc we're overdrawing chunks // lower bound / int rxlo = (int)((xlo+1) / REGW) * REGW; int rzlo = (int)((zlo+1) / REGD) * REGD; unsigned seed = SEED2(rxlo, rzlo); // find region center int rxcenter = rxlo + REGW/2; int rzcenter = rzlo + REGD/2; struct point PC = (struct point){rxcenter, TILESH - RANDI(1, 25), rzcenter}; struct point P0; struct point P1; struct point P2; struct point P3 = PC; int nr_caves = RANDI(0, 100); // cave system stretchiness int sx = RANDI(10, 60); int sy = RANDI(10, 60); int sz = RANDI(10, 60); #define MAX_CAVE_POINTS 10000 #define QCAVE(x,y,z,radius_sq) ((struct qcave){x, y, z, radius_sq}) struct qcave cave_points[MAX_CAVE_POINTS]; int cave_p_len = 0; for (int i = 0; i < nr_caves; i++) { // random walk from center of region, or end of last curve P0 = RANDP(33) ? PC : P3; P1 = (struct point){P0.x + RANDI(-sx, sx), P0.y + RANDI(-sy, sy), P0.z + RANDI(-sz, sz)}; P2 = (struct point){P1.x + RANDI(-sx, sx), P1.y + RANDI(-sy, sy), P1.z + RANDI(-sz, sz)}; P3 = (struct point){P2.x + RANDI(-sx, sx), P2.y + RANDI(-sy, sy), P2.z + RANDI(-sz, sz)}; float root_radius = 0.f, delta = 0.f; for (float t = 0.f; t <= 1.f; t += 0.001f) { if (cave_p_len >= MAX_CAVE_POINTS) break; if (root_radius == 0.f || RANDP(0.002f)) { root_radius = RAND01; delta = RANDF(-0.001f, 0.001f); } root_radius += delta; float radius_sq = root_radius * root_radius * root_radius * root_radius * 50.f; CLAMP(radius_sq, 1.f, 50.f); float s = 1.f - t; int x = (int)(s*s*s*P0.x + 3.f*t*s*s*P1.x + 3.f*t*t*s*P2.x + t*t*t*P3.x); int y = (int)(s*s*s*P0.y + 3.f*t*s*s*P1.y + 3.f*t*t*s*P2.y + t*t*t*P3.y); int z = (int)(s*s*s*P0.z + 3.f*t*s*s*P1.z + 3.f*t*t*s*P2.z + t*t*t*P3.z); // TODO: don't store duplicate cave points? if (x >= xlo && x <= xhi && y >= 0 && y <= TILESD - 1 && z >= zlo && z <= zhi) cave_points[cave_p_len++] = QCAVE(x, y, z, radius_sq); } } // carve caves #pragma omp parallel for for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++) for (int y = 0; y < TILESH-2; y++) for (int i = 0; i < cave_p_len; i++) { int dist_sq = DIST_SQ(cave_points[i].x - x, cave_points[i].y - y, cave_points[i].z - z); if (dist_sq <= cave_points[i].radius_sq) { TT_(x, y, z) = OPEN; break; } } // correcting pass over middle, contain floating water #pragma omp parallel for for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++) for (int y = 100; y < TILESH-2; y++) { if (TT_(x, y, z) == WATR) { if (TT_(x , y , z-1) == OPEN || TT_(x , y , z+1) == OPEN || TT_(x-1, y , z ) == OPEN || TT_(x+1, y , z ) == OPEN || TT_(x , y+1, z ) == OPEN) TT_(x, y, z) = WOOD; } } // trees? float p191 = noise(zlo, 0, xlo, 191); seed = SEED2(xlo, zlo); if (p191 > 0.2f) while (RANDP(95)) { char leaves = RANDBOOL ? RLEF : YLEF; float radius = RANDF(1.f, 4.f); int x = xlo + CHUNKW/2 + RANDI(-5, 5); int z = zlo + CHUNKD/2 + RANDI(-5, 5); for (int y = 10; y < TILESH-2; y++) { if (TT_(x, y, z) == OPEN) continue; if (TT_(x, y, z) != GRAS && TT_(x, y, z) != DIRT) break; int yy = y; for (; yy >= y - RANDI(3, 8); yy--) TT_(x, yy, z) = WOOD; int ymax = yy + RANDI(2, 4); for (int i = x-3; i <= x+3; i++) for (int j = yy-3; j <= ymax; j++) for (int k = z-3; k <= z+3; k++) { float dist = (i-x) * (i-x) + (j-yy) * (j-yy) + (k-z) * (k-z); if (TT_(i, j, k) == OPEN && dist < radius * radius) TT_(i, j, k) = leaves; } break; } } // cleanup gndheight and set initial lighting #pragma omp parallel for for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++) { int above_ground = true; int light_level = 15; int wet = false; for (int y = 0; y < TILESH-1; y++) { if (above_ground && IS_OPAQUE(x, y, z)) { TGNDH_(x, z) = y; above_ground = false; if (y) { TSUN_(x, y-1, z) = 0; sun_enqueue(x, y-1, z, 0, light_level); } light_level = 0; } if (wet && TT_(x, y, z) == OPEN) TT_(x, y, z) = WATR; if (wet && IS_SOLID(x, y, z)) wet = false; if (TT_(x, y, z) == WATR) { wet = true; if (light_level) light_level--; if (light_level) light_level--; } TSUN_(x, y, z) = light_level; } } recalc_corner_lighting(xlo, xhi, zlo, zhi); } // update terrain worker thread(s) copies of scoot vars void terrain_apply_scoot() { #pragma omp critical { tscootx = future_scootx * CHUNKW; tscootz = future_scootz * CHUNKD; tchunk_scootx = future_scootx; tchunk_scootz = future_scootz; } } // on its own thread, loops forever building chunks when needed void chunk_builder() { for(;;) { terrain_apply_scoot(); int best_x = 0, best_z = 0; int px = (player[0].pos.x / BS + CHUNKW2) / CHUNKW; int pz = (player[0].pos.z / BS + CHUNKD2) / CHUNKD; CLAMP(px, 0, VAOW-1); CLAMP(pz, 0, VAOD-1); // find nearest ungenerated chunk int best_dist = 99999999; for (int x = 0; x < VAOW; x++) for (int z = 0; z < VAOD; z++) { if (TAGEN_(x, z)) continue; int dist_sq = (x - px) * (x - px) + (z - pz) * (z - pz); if (dist_sq < best_dist) { best_dist = dist_sq; best_x = x; best_z = z; } } if (best_dist == 99999999) { SDL_Delay(1); continue; } int xlo = best_x * CHUNKW; int zlo = best_z * CHUNKD; int xhi = xlo + CHUNKW; int zhi = zlo + CHUNKD; int ticks_before = SDL_GetTicks(); gen_chunk(xlo-1, xhi+1, zlo-1, zhi+1); nr_chunks_generated++; chunk_gen_ticks += SDL_GetTicks() - ticks_before; TAGEN_(best_x, best_z) = true; #pragma omp critical { just_generated[just_gen_len].x = best_x; just_generated[just_gen_len].z = best_z; just_gen_len++; } } }
myOmp.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { #pragma omp parallel { int ID = omp_get_thread_num(); printf("Hello World!! %d\n", ID); } return 0; }
parallel_for_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}} #pragma omp parallel for // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}} #pragma omp parallel for foo void test_no_clause() { int i; #pragma omp parallel for for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}} #pragma omp parallel for ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel for for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel for'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for linear(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} #pragma omp parallel for collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
graphStats.c
// ----------------------------------------------------------------------------- // // "00_AccelGraph" // // ----------------------------------------------------------------------------- // Copyright (c) 2014-2019 All rights reserved // ----------------------------------------------------------------------------- // Author : Abdullah Mughrabi/http://www.martinbroadhurst.com/levenshtein-distance-in-c.html // Email : atmughra@ncsu.edu||atmughrabi@gmail.com // File : graphStats.c // Create : 2019-06-21 17:15:17 // Revise : 2019-09-28 15:37:12 // Editor : Abdullah Mughrabi // ----------------------------------------------------------------------------- #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <argp.h> #include <stdbool.h> #include <omp.h> #include <string.h> #include <math.h> #include <stdint.h> #include <assert.h> #include "fixedPoint.h" #include "timer.h" #include "myMalloc.h" #include "graphConfig.h" #include "graphCSR.h" #include "pageRank.h" #include "graphStats.h" static int min3(int a, int b, int c) { if (a < b && a < c) { return a; } if (b < a && b < c) { return b; } return c; } static uint32_t levenshtein_matrix_calculate(edit **mat, const uint32_t *array1, uint32_t len1, const uint32_t *array2, uint32_t len2) { uint32_t i, j; for (j = 1; j <= len2; j++) { for (i = 1; i <= len1; i++) { uint32_t substitution_cost; uint32_t del = 0, ins = 0, subst = 0; uint32_t best; if (array1[i - 1] == array2[j - 1]) { substitution_cost = 0; } else { substitution_cost = 1; } del = mat[i - 1][j].score + 1; /* deletion */ ins = mat[i][j - 1].score + 1; /* insertion */ subst = mat[i - 1][j - 1].score + substitution_cost; /* substitution */ best = min3(del, ins, subst); mat[i][j].score = best; mat[i][j].arg1 = array1[i - 1]; mat[i][j].arg2 = array2[j - 1]; mat[i][j].pos = i - 1; if (best == del) { mat[i][j].type = DELETION; mat[i][j].prev = &mat[i - 1][j]; } else if (best == ins) { mat[i][j].type = INSERTION; mat[i][j].prev = &mat[i][j - 1]; } else { if (substitution_cost > 0) { mat[i][j].type = SUBSTITUTION; } else { mat[i][j].type = NONE; } mat[i][j].prev = &mat[i - 1][j - 1]; } } } return mat[len1][len2].score; } static edit **levenshtein_matrix_create(const uint32_t *array1, uint32_t len1, const uint32_t *array2, uint32_t len2) { uint32_t i, j; edit **mat = malloc((len1 + 1) * sizeof(edit *)); if (mat == NULL) { return NULL; } for (i = 0; i <= len1; i++) { mat[i] = malloc((len2 + 1) * sizeof(edit)); if (mat[i] == NULL) { for (j = 0; j < i; j++) { free(mat[j]); } free(mat); return NULL; } } for (i = 0; i <= len1; i++) { mat[i][0].score = i; mat[i][0].prev = NULL; mat[i][0].arg1 = 0; mat[i][0].arg2 = 0; } for (j = 0; j <= len2; j++) { mat[0][j].score = j; mat[0][j].prev = NULL; mat[0][j].arg1 = 0; mat[0][j].arg2 = 0; } return mat; } uint32_t levenshtein_distance(const uint32_t *array1, const uint32_t len1, const uint32_t *array2, const uint32_t len2, edit **script) { uint32_t i, distance; edit **mat, *head; /* If either string is empty, the distance is the other string's length */ if (len1 == 0) { return len2; } if (len2 == 0) { return len1; } /* Initialise the matrix */ mat = levenshtein_matrix_create(array1, len1, array2, len2); if (!mat) { *script = NULL; return 0; } /* Main algorithm */ distance = levenshtein_matrix_calculate(mat, array1, len1, array2, len2); /* Read back the edit script */ *script = malloc(distance * sizeof(edit)); if (*script) { i = distance - 1; for (head = &mat[len1][len2]; head->prev != NULL; head = head->prev) { if (head->type != NONE) { memcpy(*script + i, head, sizeof(edit)); i--; } } } else { distance = 0; } /* Clean up */ for (i = 0; i <= len1; i++) { free(mat[i]); } free(mat); return distance; } void print(const edit *e) { if (e->type == INSERTION) { printf("Insert %u", e->arg2); } else if (e->type == DELETION) { printf("Delete %u", e->arg1); } else { printf("Substitute %u for %u", e->arg2, e->arg1); } printf(" at %u\n", e->pos); } /*-------------------------------------------------------------------------*/ /* Sorts in place, returns the bubble sort distance between the input array * and the sorted array. */ static int insertionSort(float *arr, int len) { int maxJ, i, j, swapCount = 0; /* printf("enter insertionSort len=%d\n",len) ; */ if(len < 2) { return 0; } maxJ = len - 1; for(i = len - 2; i >= 0; --i) { float val = arr[i]; for(j = i; j < maxJ && arr[j + 1] < val; ++j) { arr[j] = arr[j + 1]; } arr[j] = val; swapCount += (j - i); } return swapCount; } /*-------------------------------------------------------------------------*/ static int merge(float *from, float *to, int middle, int len) { int bufIndex, leftLen, rightLen, swaps ; float *left, *right; /* printf("enter merge\n") ; */ bufIndex = 0; swaps = 0; left = from; right = from + middle; rightLen = len - middle; leftLen = middle; while(leftLen && rightLen) { if(right[0] < left[0]) { to[bufIndex] = right[0]; swaps += leftLen; rightLen--; right++; } else { to[bufIndex] = left[0]; leftLen--; left++; } bufIndex++; } if(leftLen) { #pragma omp critical (MEMCPY) memcpy(to + bufIndex, left, leftLen * sizeof(float)); } else if(rightLen) { #pragma omp critical (MEMCPY) memcpy(to + bufIndex, right, rightLen * sizeof(float)); } return swaps; } /*-------------------------------------------------------------------------*/ /* Sorts in place, returns the bubble sort distance between the input array * and the sorted array. */ static int mergeSort(float *x, float *buf, int len) { int swaps, half ; /* printf("enter mergeSort\n") ; */ if(len < 10) { return insertionSort(x, len); } swaps = 0; if(len < 2) { return 0; } half = len / 2; swaps += mergeSort(x, buf, half); swaps += mergeSort(x + half, buf + half, len - half); swaps += merge(x, buf, half, len); #pragma omp critical (MEMCPY) memcpy(x, buf, len * sizeof(float)); return swaps; } /*-------------------------------------------------------------------------*/ static int getMs(float *data, int len) /* Assumes data is sorted */ { int Ms = 0, tieCount = 0, i ; /* printf("enter getMs\n") ; */ for(i = 1; i < len; i++) { if(data[i] == data[i - 1]) { tieCount++; } else if(tieCount) { Ms += (tieCount * (tieCount + 1)) / 2; tieCount = 0; } } if(tieCount) { Ms += (tieCount * (tieCount + 1)) / 2; } return Ms; } /*-------------------------------------------------------------------------*/ /* This function calculates the Kendall correlation tau_b. * The arrays arr1 should be sorted before this call, and arr2 should be * re-ordered in lockstep. This can be done by calling * qsort_floatfloat(len,arr1,arr2) * for example. * Note also that arr1 and arr2 will be modified, so if they need to * be preserved, do so before calling this function. */ float kendallNlogN( float *arr1, float *arr2, int len ) { int m1 = 0, m2 = 0, tieCount, swapCount, nPair, s, i ; float cor ; /* printf("enter kendallNlogN\n") ; */ if( len < 2 ) return (float)0 ; nPair = len * (len - 1) / 2; s = nPair; tieCount = 0; for(i = 1; i < len; i++) { if(arr1[i - 1] == arr1[i]) { tieCount++; } else if(tieCount > 0) { insertionSort(arr2 + i - tieCount - 1, tieCount + 1); m1 += tieCount * (tieCount + 1) / 2; s += getMs(arr2 + i - tieCount - 1, tieCount + 1); tieCount = 0; } } if(tieCount > 0) { insertionSort(arr2 + i - tieCount - 1, tieCount + 1); m1 += tieCount * (tieCount + 1) / 2; s += getMs(arr2 + i - tieCount - 1, tieCount + 1); } swapCount = mergeSort(arr2, arr1, len); m2 = getMs(arr2, len); s -= (m1 + m2) + 2 * swapCount; if( m1 < nPair && m2 < nPair ) cor = s / ( sqrtf((float)(nPair - m1)) * sqrtf((float)(nPair - m2)) ) ; else cor = 0.0f ; return cor ; } /*-------------------------------------------------------------------------*/ /* This function uses a simple O(N^2) implementation. It probably has a * smaller constant and therefore is useful in the small N case, and is also * useful for testing the relatively complex O(N log N) implementation. */ float kendallSmallN( float *arr1, float *arr2, int len ) { int m1 = 0, m2 = 0, s = 0, nPair, i, j ; float cor ; /* printf("enter kendallSmallN\n") ; */ for(i = 0; i < len; i++) { for(j = i + 1; j < len; j++) { if(arr2[i] > arr2[j]) { if (arr1[i] > arr1[j]) { s++; } else if(arr1[i] < arr1[j]) { s--; } else { m1++; } } else if(arr2[i] < arr2[j]) { if (arr1[i] > arr1[j]) { s--; } else if(arr1[i] < arr1[j]) { s++; } else { m1++; } } else { m2++; if(arr1[i] == arr1[j]) { m1++; } } } } nPair = len * (len - 1) / 2; if( m1 < nPair && m2 < nPair ) cor = s / ( sqrtf((float)(nPair - m1)) * sqrtf((float)(nPair - m2)) ) ; else cor = 0.0f ; return cor ; } void rvereseArray(uint32_t *arr, uint32_t start, uint32_t end) { while (start < end) { int temp = arr[start]; arr[start] = arr[end]; arr[end] = temp; start++; end--; } } uint32_t levenshtein_distance_topK(uint32_t *array1, uint32_t *array2, uint32_t size_k) { edit *script; uint32_t distance; distance = levenshtein_distance(array1, size_k, array2, size_k, &script); free(script); return distance; } uint32_t avg_mismatch_ranks_real_topK(uint32_t *array1, uint32_t *array2, uint32_t *array3, uint32_t size_k, uint32_t topk) { uint32_t v; uint32_t mismatch = 0; if(topk > size_k) topk = size_k; for(v = size_k - topk; v < size_k; v++) { if(array2[array3[v]] != array1[array3[v]]) mismatch++; } return mismatch ; } double avg_error_ranks_real_topK(uint32_t *array1, uint32_t *array2, uint32_t *array3, uint32_t size_k, uint32_t topk) { uint32_t v; double error = 0.0f; if(topk > size_k) topk = size_k; for(v = size_k - topk; v < size_k; v++) { error += abs(array2[array3[v]] - array1[array3[v]])/((double)array1[array3[v]]+1); // printf("rank%d v%d rv_ref%d rv_cmp%d diff%d err%lf \n", v, array/3[v], array1[array3[v]], array2[array3[v]], abs(array2[array3[v]] - array1[array3[v]]), error); } return error / topk; } double avg_error_ranks_float_topK(float *array1, float *array2, uint32_t *array3, uint32_t size_k, uint32_t topk) { uint32_t v; double error = 0.0f; if(topk > size_k) topk = size_k; for(v = size_k - topk; v < size_k; v++) { // if(array1[v] > 0.0f) if((double)array1[array3[v]] > 0.0f) error += fabs((double)array2[array3[v]] - (double)array1[array3[v]])/(double)array1[array3[v]]; // printf("rank%d v%d rv_ref%lf rv_cmp%lf diff%lf err%.22lf \n", v, array3[v], array1[array3[v]], array2[array3[v]], fabs(array2[array3[v]] - array1[array3[v]]), error); } return error / topk; } uint32_t intersection_topK(uint32_t *array1, uint32_t *array2, uint32_t size_k, uint32_t topk) { uint32_t v; uint32_t intersection = 0; if(topk > size_k) topk = size_k; for(v = size_k - topk; v < size_k; v++) { // printf("%d %d %d %d\n",v,array1[v], array2[array1[v]], size_k-topk); if(array2[array1[v]] >= size_k - topk) intersection++; } return intersection; } struct PageRankCorrelationStats collectStatsPageRank_topK(struct PageRankStats *ref_stats, struct PageRankStats *stats, uint32_t *ref_rankedVertices_total, uint32_t *ref_rankedVertices_inverse, uint32_t *rankedVertices_inverse, uint32_t topk, uint32_t num_vertices, FILE *fptr, uint32_t verbose) { uint32_t v; uint32_t u; struct PageRankCorrelationStats pageRankCorrelationStats; if(topk > num_vertices) topk = num_vertices; uint32_t *rankedVertices = (uint32_t *) my_malloc(topk * sizeof(uint32_t)); uint32_t *ref_rankedVertices = (uint32_t *) my_malloc(topk * sizeof(uint32_t)); float *rankedVerticesfloat = (float *) my_malloc(topk * sizeof(float)); float *ref_rankedVerticesfloat = (float *) my_malloc(topk * sizeof(float)); float *rankedVerticesReal = (float *) my_malloc(topk * sizeof(float)); float *ref_rankedVerticesReal = (float *) my_malloc(topk * sizeof(float)); uint32_t levenshtein_distance = 0; float float_Kendall = 0.0f; float real_Kendall = 0.0f; uint32_t intersection = 0; uint32_t mismatch = 0; double avg_error_float = 0.0f; double avg_error_relative = 0.0f; for(u = 0, v = (num_vertices - topk); v < num_vertices; v++, u++) { rankedVertices[u] = stats->realRanks[v]; rankedVerticesfloat[u] = stats->pageRanks[stats->realRanks[v]]; rankedVerticesReal[u] = (float)stats->realRanks[v] / (float)1.0; } for(u = 0, v = (num_vertices - topk); v < num_vertices; v++, u++) { ref_rankedVertices[u] = ref_stats->realRanks[v]; ref_rankedVerticesfloat[u] = ref_stats->pageRanks[stats->realRanks[v]]; ref_rankedVerticesReal[u] = (float)ref_stats->realRanks[v] / (float)1.0; } levenshtein_distance = levenshtein_distance_topK(ref_rankedVertices, rankedVertices, topk); float_Kendall = kendallSmallN(ref_rankedVerticesfloat, rankedVerticesfloat, topk); real_Kendall = kendallSmallN(ref_rankedVerticesReal, rankedVerticesReal, topk); intersection = intersection_topK(ref_rankedVertices_total, rankedVertices_inverse, num_vertices, topk); avg_error_float = avg_error_ranks_float_topK(ref_stats->pageRanks, stats->pageRanks, ref_stats->realRanks, num_vertices, topk); avg_error_relative = avg_error_ranks_real_topK(ref_rankedVertices_inverse, rankedVertices_inverse, ref_stats->realRanks, num_vertices, topk); mismatch = avg_mismatch_ranks_real_topK(ref_rankedVertices_inverse, rankedVertices_inverse, ref_stats->realRanks, num_vertices, topk); if(verbose > 0) { fprintf(stdout, "\n-----------------------------------------------------\n"); fprintf(stdout, "topk: %u \n", topk); fprintf(stdout, "-----------------------------------------------------\n"); fprintf(stdout, "levenshtein_distance: %u \n", levenshtein_distance); fprintf(stdout, "Rank float Kendall: %lf\n", float_Kendall); fprintf(stdout, "Rank real Kendall: %lf\n", real_Kendall); fprintf(stdout, "intersection: %u \n", intersection); fprintf(stdout, "mismatch: %u \n", mismatch); fprintf(stdout, "avg_error_float: %.22lf\n", avg_error_float); fprintf(stdout, "avg_error_relative: %.22lf\n", avg_error_relative); fprintf(stdout, "-----------------------------------------------------\n"); if(fptr) { fprintf(fptr, "\n-----------------------------------------------------\n"); fprintf(fptr, "topk: %u \n", topk); fprintf(fptr, "-----------------------------------------------------\n"); fprintf(fptr, "levenshtein_distance: %u \n", levenshtein_distance); fprintf(fptr, "Rank float Kendall: %lf\n", float_Kendall); fprintf(fptr, "Rank real Kendall: %lf\n", real_Kendall); fprintf(fptr, "intersection: %u \n", intersection); fprintf(fptr, "mismatch: %u \n", mismatch); fprintf(fptr, "avg_error_float: %lf\n", avg_error_float); fprintf(fptr, "avg_error_relative: %lf\n", avg_error_relative); fprintf(fptr, "-----------------------------------------------------\n"); } } pageRankCorrelationStats.levenshtein_distance = levenshtein_distance; pageRankCorrelationStats.float_Kendall = float_Kendall; pageRankCorrelationStats.real_Kendall = real_Kendall; pageRankCorrelationStats.intersection = intersection; pageRankCorrelationStats.mismatch = mismatch; pageRankCorrelationStats.avg_error_float = avg_error_float; pageRankCorrelationStats.avg_error_relative = avg_error_relative; free(rankedVertices); free(ref_rankedVertices); free(rankedVerticesfloat); free(ref_rankedVerticesfloat); free(rankedVerticesReal); free(ref_rankedVerticesReal); return pageRankCorrelationStats; } void collectStatsPageRank( struct Arguments *arguments, struct PageRankStats *ref_stats, struct PageRankStats *stats, uint32_t trial) { uint32_t v; uint32_t topk; uint32_t x; uint32_t chunk_x; uint32_t chunk_num; uint32_t *rankedVertices_inverse = (uint32_t *) my_malloc(ref_stats->num_vertices * sizeof(uint32_t)); uint32_t *ref_rankedVertices_inverse = (uint32_t *) my_malloc(ref_stats->num_vertices * sizeof(uint32_t)); uint32_t *ref_rankedVertices_total = (uint32_t *) my_malloc(ref_stats->num_vertices * sizeof(uint32_t)); uint32_t topK_array_size = 6; uint32_t topK_array[] = {30, 100, 300, 1000, 5000, 10000} ; struct PageRankCorrelationStats pageRankCorrelationStats_array[6]; struct PageRankCorrelationStats pageRankCorrelationStats; struct PageRankCorrelationStats pageRankCorrelationStatsAvg; struct PageRankCorrelationStats pageRankCorrelationStatsSum; pageRankCorrelationStats.levenshtein_distance = 0; pageRankCorrelationStats.float_Kendall = 0.0f; pageRankCorrelationStats.real_Kendall = 0.0f; pageRankCorrelationStats.intersection = 0; pageRankCorrelationStats.mismatch = 0; pageRankCorrelationStats.avg_error_float = 0.0f; pageRankCorrelationStats.avg_error_relative = 0.0f; pageRankCorrelationStatsSum.levenshtein_distance = 0; pageRankCorrelationStatsSum.float_Kendall = 0.0f; pageRankCorrelationStatsSum.real_Kendall = 0.0f; pageRankCorrelationStatsSum.intersection = 0; pageRankCorrelationStatsSum.mismatch = 0; pageRankCorrelationStatsSum.avg_error_float = 0.0f; pageRankCorrelationStatsSum.avg_error_relative = 0.0f; pageRankCorrelationStatsAvg.levenshtein_distance = 0; pageRankCorrelationStatsAvg.float_Kendall = 0.0f; pageRankCorrelationStatsAvg.real_Kendall = 0.0f; pageRankCorrelationStatsAvg.intersection = 0; pageRankCorrelationStatsAvg.mismatch = 0; pageRankCorrelationStatsAvg.avg_error_float = 0.0f; pageRankCorrelationStatsAvg.avg_error_relative = 0.0f; char *fname_txt = (char *) malloc((strlen(arguments->fnameb) + 50) * sizeof(char)); sprintf(fname_txt, "%s_%d_%d_%d_%d.%s", arguments->fnameb, arguments->algorithm, arguments->datastructure, trial, arguments->pushpull, "stats"); FILE *fptr; fptr = fopen(fname_txt, "a+"); topk = arguments->binSize; if(topk > ref_stats->num_vertices) topk = ref_stats->num_vertices; for(v = 0; v < stats->num_vertices; v++) { rankedVertices_inverse[stats->realRanks[v]] = v; ref_rankedVertices_inverse[ref_stats->realRanks[v]] = v; ref_rankedVertices_total[v] = ref_stats->realRanks[v]; } for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; pageRankCorrelationStats = collectStatsPageRank_topK(ref_stats, stats, ref_rankedVertices_total, ref_rankedVertices_inverse, rankedVertices_inverse, topK_array[x], ref_stats->num_vertices, fptr, 0); pageRankCorrelationStats_array[x] = pageRankCorrelationStats; } if(arguments->verbosity > 0) { fprintf(stdout, "----------------------------------------------------------------------------------------------------------\n"); fprintf(stdout, "Top K "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14u ", topK_array[x]); } fprintf(stdout, "\n"); fprintf(stdout, "----------------------------------------------------------------------------------------------------------\n"); fprintf(stdout, "levenshtein_distance "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14u ", pageRankCorrelationStats_array[x].levenshtein_distance); } fprintf(stdout, "\n"); fprintf(stdout, "Rank float Kendall "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14lf ", pageRankCorrelationStats_array[x].float_Kendall); } fprintf(stdout, "\n"); fprintf(stdout, "Rank real Kendall "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14lf ", pageRankCorrelationStats_array[x].real_Kendall); } fprintf(stdout, "\n"); fprintf(stdout, "intersection "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14u ", pageRankCorrelationStats_array[x].intersection); } fprintf(stdout, "\n"); fprintf(stdout, "mismatch "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14u ", pageRankCorrelationStats_array[x].mismatch); } fprintf(stdout, "\n"); fprintf(stdout, "avg_error_float "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14lf ", pageRankCorrelationStats_array[x].avg_error_float); } fprintf(stdout, "\n"); fprintf(stdout, "avg_error_relative "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(stdout, "%-14lf ", pageRankCorrelationStats_array[x].avg_error_relative); } fprintf(stdout, "\n"); fprintf(stdout, "----------------------------------------------------------------------------------------------------------\n"); fprintf(fptr, "----------------------------------------------------------------------------------------------------------\n"); fprintf(fptr, "Top K "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14u ", topK_array[x]); } fprintf(fptr, "\n"); fprintf(fptr, "----------------------------------------------------------------------------------------------------------\n"); fprintf(fptr, "levenshtein_distance "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14u ", pageRankCorrelationStats_array[x].levenshtein_distance); } fprintf(fptr, "\n"); fprintf(fptr, "Rank float Kendall "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14lf ", pageRankCorrelationStats_array[x].float_Kendall); } fprintf(fptr, "\n"); fprintf(fptr, "Rank real Kendall "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14lf ", pageRankCorrelationStats_array[x].real_Kendall); } fprintf(fptr, "\n"); fprintf(fptr, "intersection "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14u ", pageRankCorrelationStats_array[x].intersection); } fprintf(fptr, "\n"); fprintf(fptr, "mismatch "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14u ", pageRankCorrelationStats_array[x].mismatch); } fprintf(fptr, "\n"); fprintf(fptr, "avg_error_float "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14lf ", pageRankCorrelationStats_array[x].avg_error_float); } fprintf(fptr, "\n"); fprintf(fptr, "avg_error_relative "); for (x = 0; x < topK_array_size; ++x) { if(ref_stats->num_vertices < topK_array[x]) break; fprintf(fptr, "%-14lf ", pageRankCorrelationStats_array[x].avg_error_relative); } fprintf(fptr, "\n"); fprintf(fptr, "----------------------------------------------------------------------------------------------------------\n"); } chunk_x = 1000; chunk_num = (ref_stats->num_vertices + chunk_x - 1) / chunk_x; if(arguments->verbosity > 1) { if(chunk_num == 1) { chunk_num = 1; chunk_x = ref_stats->num_vertices; pageRankCorrelationStats = collectStatsPageRank_topK(ref_stats, stats, ref_rankedVertices_total, ref_rankedVertices_inverse, rankedVertices_inverse, chunk_x, ref_stats->num_vertices, fptr, 1); pageRankCorrelationStatsSum.levenshtein_distance += pageRankCorrelationStats.levenshtein_distance; pageRankCorrelationStatsSum.float_Kendall += pageRankCorrelationStats.float_Kendall; pageRankCorrelationStatsSum.real_Kendall += pageRankCorrelationStats.real_Kendall; pageRankCorrelationStatsSum.intersection += pageRankCorrelationStats.intersection; pageRankCorrelationStatsSum.mismatch += pageRankCorrelationStats.mismatch; pageRankCorrelationStatsSum.avg_error_float += pageRankCorrelationStats.avg_error_float; pageRankCorrelationStatsSum.avg_error_relative += pageRankCorrelationStats.avg_error_relative; } else { for(x = 0; x < chunk_num; x++) { pageRankCorrelationStats = collectStatsPageRank_topK(ref_stats, stats, ref_rankedVertices_total, ref_rankedVertices_inverse, rankedVertices_inverse, chunk_x, (ref_stats->num_vertices - (chunk_x * x)), fptr, 0); pageRankCorrelationStatsSum.levenshtein_distance += pageRankCorrelationStats.levenshtein_distance; pageRankCorrelationStatsSum.float_Kendall += pageRankCorrelationStats.float_Kendall; pageRankCorrelationStatsSum.real_Kendall += pageRankCorrelationStats.real_Kendall; pageRankCorrelationStatsSum.intersection += pageRankCorrelationStats.intersection; pageRankCorrelationStatsSum.mismatch += pageRankCorrelationStats.mismatch; pageRankCorrelationStatsSum.avg_error_float += pageRankCorrelationStats.avg_error_float; pageRankCorrelationStatsSum.avg_error_relative += pageRankCorrelationStats.avg_error_relative; } } } pageRankCorrelationStatsAvg.levenshtein_distance = pageRankCorrelationStatsSum.levenshtein_distance / chunk_num; pageRankCorrelationStatsAvg.float_Kendall = pageRankCorrelationStatsSum.float_Kendall / chunk_num; pageRankCorrelationStatsAvg.real_Kendall = pageRankCorrelationStatsSum.real_Kendall / chunk_num; pageRankCorrelationStatsAvg.intersection = pageRankCorrelationStatsSum.intersection / chunk_num; pageRankCorrelationStatsAvg.mismatch = pageRankCorrelationStatsSum.mismatch / chunk_num; pageRankCorrelationStatsAvg.avg_error_float = pageRankCorrelationStatsSum.avg_error_float / chunk_num; pageRankCorrelationStatsAvg.avg_error_relative = pageRankCorrelationStatsSum.avg_error_relative / chunk_num; pageRankCorrelationStats = collectStatsPageRank_topK(ref_stats, stats, ref_rankedVertices_total, ref_rankedVertices_inverse, rankedVertices_inverse, topk, ref_stats->num_vertices, fptr, 1); fprintf(stdout, "-----------------------------------------------------\n"); fprintf(stdout, "Avg (Sum(bin)*n)/n: (Sum(%u)*%u)/%u \n", chunk_x, chunk_num, chunk_num); fprintf(stdout, "-----------------------------------------------------\n"); fprintf(stdout, "levenshtein_distance: %u \n", pageRankCorrelationStatsAvg.levenshtein_distance); fprintf(stdout, "Rank float Kendall: %lf\n", pageRankCorrelationStatsAvg.float_Kendall); fprintf(stdout, "Rank real Kendall: %lf\n", pageRankCorrelationStatsAvg.real_Kendall); fprintf(stdout, "intersection: %u \n", pageRankCorrelationStatsAvg.intersection); fprintf(stdout, "mismatch: %u \n", pageRankCorrelationStatsAvg.mismatch); fprintf(stdout, "avg_error_float: %lf\n", pageRankCorrelationStatsAvg.avg_error_float); fprintf(stdout, "avg_error_relative: %lf\n", pageRankCorrelationStatsAvg.avg_error_relative); fprintf(stdout, "-----------------------------------------------------\n"); fprintf(stdout, "-----------------------------------------------------\n"); fprintf(stdout, "numThreads: %u \n", arguments->pre_numThreads); fprintf(stdout, "Time (S): %lf\n", stats->time_total); fprintf(stdout, "Iterations: %u \n", stats->iterations); fprintf(stdout, "-----------------------------------------------------\n"); if(arguments->verbosity > 0) { fprintf(fptr, "-----------------------------------------------------\n"); fprintf(fptr, "Avg (Sum_n(bins))/n: (Sum_%u(%u)/%u \n", chunk_num, chunk_x, chunk_num); fprintf(fptr, "-----------------------------------------------------\n"); fprintf(fptr, "levenshtein_distance: %u \n", pageRankCorrelationStatsAvg.levenshtein_distance); fprintf(fptr, "Rank float Kendall: %lf\n", pageRankCorrelationStatsAvg.float_Kendall); fprintf(fptr, "Rank real Kendall: %lf\n", pageRankCorrelationStatsAvg.real_Kendall); fprintf(fptr, "intersection: %u \n", pageRankCorrelationStatsAvg.intersection); fprintf(fptr, "mismatch: %u \n", pageRankCorrelationStatsAvg.mismatch); fprintf(fptr, "avg_error_float: %lf\n", pageRankCorrelationStatsAvg.avg_error_float); fprintf(fptr, "avg_error_relative: %lf\n", pageRankCorrelationStatsAvg.avg_error_relative); fprintf(fptr, "-----------------------------------------------------\n"); fprintf(fptr, "-----------------------------------------------------\n"); fprintf(fptr, "numThreads: %u \n", arguments->pre_numThreads); fprintf(fptr, "Time (S): %lf\n", stats->time_total); fprintf(fptr, "Iterations: %u \n", stats->iterations); fprintf(fptr, "-----------------------------------------------------\n"); if(arguments->verbosity > 2) { fprintf(fptr, " ----------------------------------------------------- "); fprintf(fptr, " -----------------------------------------------------\n"); fprintf(fptr, "| %-14s | %-14s | %-17s | ", "Ref Rank", "Vertex", "PageRank"); fprintf(fptr, "| %-14s | %-14s | %-17s | \n", "Rank", "Vertex", "PageRank"); fprintf(fptr, " ----------------------------------------------------- "); fprintf(fptr, " -----------------------------------------------------\n"); for(v = (ref_stats->num_vertices - topk); v < ref_stats->num_vertices; v++) { // fprintf(fptr,"rank %u vertex %u pr %.22f \n", v, ref_stats->realRanks[v], ref_stats->pageRanks[ref_stats->realRanks[v]]); fprintf(fptr, "| %-14u | %-14u | %-10.15lf | ", v, ref_stats->realRanks[v], ref_stats->pageRanks[ref_stats->realRanks[v]]); fprintf(fptr, "| %-14u | %-14u | %-10.15lf | \n", v, stats->realRanks[v], stats->pageRanks[stats->realRanks[v]]); } fprintf(fptr, " ----------------------------------------------------- "); fprintf(fptr, " -----------------------------------------------------\n"); } } fclose(fptr); free(fname_txt); free(ref_rankedVertices_total); free(rankedVertices_inverse); free(ref_rankedVertices_inverse); } // void collectStats(struct Arguments *arguments) // { // struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); // // printf("Filename : %s \n",fnameb); // printf(" *****************************************************\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-51s | \n", "Collect Stats Process"); // printf(" -----------------------------------------------------\n"); // Start(timer); // struct GraphCSR *graphStats = graphCSRPreProcessingStep (arguments); // uint32_t *histogram_in = (uint32_t *) my_malloc(sizeof(uint32_t) * arguments->binSize); // uint32_t *histogram_out = (uint32_t *) my_malloc(sizeof(uint32_t) * arguments->binSize); // uint32_t i = 0; // #pragma omp parallel for // for(i = 0 ; i < arguments->binSize; i++) // { // histogram_in[i] = 0; // histogram_out[i] = 0; // } // char *fname_txt = (char *) malloc((strlen(arguments->fnameb) + 20) * sizeof(char)); // char *fname_stats_out = (char *) malloc((strlen(arguments->fnameb) + 20) * sizeof(char)); // char *fname_stats_in = (char *) malloc((strlen(arguments->fnameb) + 20) * sizeof(char)); // char *fname_adjMat = (char *) malloc((strlen(arguments->fnameb) + 20) * sizeof(char)); // fname_txt = strcpy (fname_txt, arguments->fnameb); // fname_adjMat = strcpy (fname_adjMat, arguments->fnameb); // fname_adjMat = strcat (fname_adjMat, ".bin-adj-SM.dat");// out-degree // if(arguments->lmode == 1) // { // fname_stats_in = strcat (fname_txt, ".in-degree.dat");// in-degree // countHistogram(graphStats, histogram_in, arguments->binSize, arguments->inout_degree); // printHistogram(fname_stats_in, histogram_in, arguments->binSize); // } // else if(arguments->lmode == 2) // { // fname_stats_out = strcat (fname_txt, ".out-degree.dat");// out-degree // countHistogram(graphStats, histogram_out, arguments->binSize, arguments->inout_degree); // printHistogram(fname_stats_out, histogram_out, arguments->binSize); // } // printSparseMatrixList(fname_adjMat, graphStats, arguments->binSize); // Stop(timer); // printf(" -----------------------------------------------------\n"); // printf("| %-51s | \n", "Collect Stats Complete"); // printf(" -----------------------------------------------------\n"); // printf("| %-51f | \n", Seconds(timer)); // printf(" -----------------------------------------------------\n"); // printf(" *****************************************************\n"); // free(timer); // graphCSRFree(graphStats); // free(histogram_in); // free(histogram_out); // free(fname_txt); // free(fname_stats_out); // free(fname_stats_in); // free(fname_adjMat); // } // void countHistogram(struct GraphCSR *graphStats, uint32_t *histogram, uint32_t binSize, uint32_t inout_degree) // { // uint32_t v; // uint32_t index; // #pragma omp parallel for // for(v = 0; v < graphStats->num_vertices; v++) // { // index = v / ((graphStats->num_vertices / binSize) + 1); // if(inout_degree == 1) // { // #pragma omp atomic update // histogram[index] += graphStats->vertices->in_degree[v]; // } // else if(inout_degree == 2) // { // #pragma omp atomic update // histogram[index] += graphStats->vertices->out_degree[v]; // } // } // } // void printHistogram(const char *fname_stats, uint32_t *histogram, uint32_t binSize) // { // uint32_t index; // FILE *fptr; // fptr = fopen(fname_stats, "w"); // for(index = 0; index < binSize; index++) // { // fprintf(fptr, "%u %u \n", index, histogram[index]); // } // fclose(fptr); // } // void printSparseMatrixList(const char *fname_stats, struct GraphCSR *graphStats, uint32_t binSize) // { // uint32_t *SparseMatrix = (uint32_t *) my_malloc(sizeof(uint32_t) * binSize * binSize); // uint32_t x; // uint32_t y; // #pragma omp parallel for private(y) shared(SparseMatrix) // for(x = 0; x < binSize; x++) // { // for(y = 0; y < binSize; y++) // { // SparseMatrix[(binSize * y) + x] = 0; // } // } // uint32_t i; // #pragma omp parallel for // for(i = 0; i < graphStats->num_edges; i++) // { // uint32_t src; // uint32_t dest; // src = graphStats->sorted_edges_array->edges_array_src[i] / ((graphStats->num_vertices / binSize) + 1); // dest = graphStats->sorted_edges_array->edges_array_dest[i] / ((graphStats->num_vertices / binSize) + 1); // #pragma omp atomic update // SparseMatrix[(binSize * dest) + src]++; // } // FILE *fptr; // fptr = fopen(fname_stats, "w"); // for(x = 0; x < binSize; x++) // { // for(y = 0; y < binSize; y++) // { // fprintf(fptr, "%u %u %u\n", x, y, SparseMatrix[(binSize * y) + x]); // } // } // fclose(fptr); // free(SparseMatrix); // }
templatemath.h
/* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <math.h> #include <cmath> #include <dll.h> #include <pointercast.h> #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif #ifdef __CUDACC__ #include <types/float16.h> #define math_def __host__ __device__ #ifdef CUDA_9 struct HALFS{ half H; half L; __host__ __device__ HALFS() {}; __host__ __device__ ~HALFS() {}; }; union PAIR { HALFS B; int W; __host__ __device__ PAIR() {}; __host__ __device__ ~PAIR(){} }; #else typedef union { struct { half H; half L; } B; int W; } PAIR; #endif // cuda_9 #else #define math_def #include <types/float16.h> #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template<typename T> math_def inline T nd4j_re(T val1, T val2); template<typename T> math_def inline T nd4j_rint(T val1); template<typename T> math_def inline T nd4j_copysign(T val1, T val2); //#ifndef __CUDACC__ template<typename T> math_def inline T nd4j_dot(T *x, T *y, int length); //#endif template<typename T> math_def inline T nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T> math_def inline T nd4j_cos(T val); template<typename T> math_def inline T nd4j_cosh(T val); template<typename T> math_def inline T nd4j_exp(T val); template<typename T> math_def inline T nd4j_floor(T val); template<typename T> math_def inline T nd4j_log(T val); template<typename T> math_def inline T nd4j_pow(T val, T val2); template<typename T> math_def inline T nd4j_round(T val); template<typename T> math_def inline T nd4j_remainder(T num, T denom); template<typename T> math_def inline T nd4j_fmod(T num, T denom); template<typename T> math_def inline T nd4j_erf(T num); template<typename T> math_def inline T nd4j_erfc(T num); template<typename T> math_def inline T nd4j_sigmoid(T val) { return (T) 1.0 / ((T) 1.0 + nd4j_exp<T>(-val)); } template<typename T> math_def inline T nd4j_elu(T val) { if (val >= (T) 0.0) return val; else return nd4j_exp<T>(val) - (T) 1.0; //return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0); } template<typename T> math_def inline T nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; //return val < 0 ? alpha * val : val; } template<typename T> math_def inline T nd4j_eluderivative(T val) { if (val >= (T) 0.0f) return (T) 1.0f; else return nd4j_exp<T>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T> math_def inline T nd4j_sin(T val); template<typename T> math_def inline T nd4j_sinh(T val); template<typename T> math_def inline T softplus(T val) { return nd4j_log<T>((T) 1.0f + nd4j_exp<T>(val)); } template<typename T> math_def inline T nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename T> math_def inline T nd4j_sqrt(T val); template<typename T> math_def inline T nd4j_tanh(T val); template<typename T> math_def inline T nd4j_tan(T val); template<typename T> math_def inline T nd4j_atan2(T val1, T val2); template<> math_def inline float16 nd4j_atan2<float16>(float16 value1, float16 value2) { return (float16) atan2f((float) value1, (float) value2); } template<> math_def inline float nd4j_atan2<float>(float value1, float value2) { return atan2f(value1, value2); } template<> math_def inline double nd4j_atan2<double>(double value1, double value2) { return atan2(value1, value2); } template<typename T> math_def inline T nd4j_tan(T val) { return nd4j_log((val + 1 / (1 - val)) * 0.5); } template<typename T> math_def inline T nd4j_tanhderivative(T val) { T tanh = nd4j_tanh(val); return (T) 1.0f - tanh * tanh; } template<typename T> math_def inline T nd4j_sigmoidderivative(T val) { T sigmoid = nd4j_sigmoid(val); T out = sigmoid * ((T) 1.0f - sigmoid); return out; } template<typename T> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (T) 1.0f / (y * y); } template<typename T> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (T) -1.0f : val > (T) 0.0f ? (T) 1.0f : (T) 0.0f; } template<typename T> math_def inline T nd4j_sign(T val) { return nd4j_sgn<T>(val); } template<typename T> math_def inline T nd4j_signum(T val) { return nd4j_sgn<T>(val); } //#ifndef __CUDACC__ template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } template<typename T> math_def inline T nd4j_dot(T *x, T *y, int length) { T dot = (T) 0.0f; #pragma omp simd reduction(+:dot) for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } //#endif template<typename T> math_def inline T nd4j_acos(T val); template<typename T> math_def inline T nd4j_acosh(T val); template<typename T> math_def inline T nd4j_asin(T val); template<typename T> math_def inline T nd4j_asinh(T val); template<typename T> math_def inline T nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log(nd4j_sqrt(nd4j_pow(val, (T) 2) + (T) 1) + val); } template<typename T> math_def inline T nd4j_atan(T val); template<typename T> math_def inline T nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return value < 0 ? -value : value; } template<> math_def inline int nd4j_abs<int>(int value) { return value < 0 ? -value : value; } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return value < 0 ? -value : value; } template<> math_def inline float16 nd4j_rint<float16>(float16 value) { return (float16) rintf((float) value); } template<> math_def inline float nd4j_rint<float>(float value) { return rintf(value); } template<> math_def inline double nd4j_rint<double>(double value) { return rint(value); } template<> math_def inline int nd4j_rint<int>(int value) { return value; } template<> math_def inline Nd4jLong nd4j_rint<Nd4jLong>(Nd4jLong value) { return value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline float16 nd4j_max<float16>(float16 val1, float16 val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline float nd4j_max<float>(float val1, float val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline double nd4j_max<double>(double val1, double val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline int nd4j_max<int>(int val1, int val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline Nd4jLong nd4j_max<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline Nd4jLong nd4j_min<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline float16 nd4j_min<float16>(float16 val1, float16 val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline float nd4j_min<float>(float val1, float val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline double nd4j_min<double>(double val1, double val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline int nd4j_min<int>(int val1, int val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline float16 nd4j_ceil<float16>(float16 val) { #ifdef NATIVE_HALFS return hceil(val.data); #else return ceilf((float) val); #endif } template<> math_def inline float nd4j_ceil<float>(float val1) { return ceilf(val1); } template<> math_def inline double nd4j_ceil<double>(double val) { return ceil(val); } template<> math_def inline int nd4j_ceil<int>(int val) { return ceil((float) val); } template<> math_def inline float16 nd4j_cos<float16>(float16 val) { #ifdef NATIVE_HALFS return hcos(val.data); #else return cosf((float) val); #endif } template<> math_def inline float nd4j_cos<float>(float val) { return cosf(val); } template<> math_def inline double nd4j_cos<double>(double val) { return cos(val); } template<> math_def inline int nd4j_cos<int>(int val) { return cosf((float) val); } template<> math_def inline float16 nd4j_cosh<float16>(float16 val) { return coshf((float) val); } template<> math_def inline float nd4j_cosh<float>(float val) { return coshf(val); } template<> math_def inline double nd4j_cosh<double>(double val) { return cosh(val); } template<> math_def inline int nd4j_cosh<int>(int val) { return coshf((float) val); } template<> math_def inline float16 nd4j_exp<float16>(float16 val) { #ifdef NATIVE_HALFS return hexp(val.data); #else return (float16) expf((float) val); #endif } template<> math_def inline float nd4j_exp<float>(float val) { return expf(val); } template<> math_def inline double nd4j_exp<double>(double val) { return exp(val); } template<> math_def inline int nd4j_exp<int>(int val) { return expf((float) val); } template<> math_def inline float16 nd4j_floor<float16>(float16 val) { #ifdef NATIVE_HALFS return hfloor(val.data); #else return (float16) floorf((float) val); #endif } template<> math_def inline float nd4j_floor<float>(float val) { return floorf(val); } template<> math_def inline double nd4j_floor<double>(double val) { return floor(val); } template<> math_def inline int nd4j_floor<int>(int val) { return floorf((float) val); } template<> math_def inline float16 nd4j_log<float16>(float16 val) { #ifdef NATIVE_HALFS return hlog(val.data); #else return (float16) logf((float) val); #endif } template<> math_def inline float nd4j_log<float>(float val) { return logf(val); } template<> math_def inline double nd4j_log<double>(double val) { return log(val); } template<> math_def inline int nd4j_log<int>(int val) { return logf((int) val); } template<> math_def inline float16 nd4j_pow<float16>(float16 val, float16 val2) { return (float16) powf((float) val, (float) val2); } template<> math_def inline float nd4j_pow<float>(float val, float val2) { return powf(val, val2); } template<> math_def inline double nd4j_pow<double>(double val, double val2) { return pow(val, val2); } template<> math_def inline int nd4j_pow<int>(int val, int val2) { return powf((float) val, (float) val2); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template<> math_def inline float16 nd4j_round<float16>(float16 val) { return (float16) roundf((float) val); } template<> math_def inline float nd4j_round<float>(float val) { return roundf(val); } template<> math_def inline float nd4j_remainder<float>(float num, float denom) { return remainderf(num, denom); } template<> math_def inline double nd4j_remainder<double>(double num, double denom) { return remainder(num, denom); } template<> math_def inline float16 nd4j_remainder<float16>(float16 num, float16 denom) { return (float16) remainderf((float) num, (float) denom); } template<> math_def inline float nd4j_fmod<float>(float num, float denom) { return fmodf(num, denom); } template<> math_def inline double nd4j_fmod<double>(double num, double denom) { return fmod(num, denom); } template<> math_def inline float16 nd4j_fmod<float16>(float16 num, float16 denom) { return (float16) fmodf((float) num, (float) denom); } template<> math_def inline float nd4j_erf<float>(float num) { return erff(num); } template<> math_def inline double nd4j_erf<double>(double num) { return erf(num); } template<> math_def inline float16 nd4j_erf<float16>(float16 num) { return (float16) erff((float) num); } template<> math_def inline float nd4j_erfc<float>(float num) { return erfcf(num); } template<> math_def inline double nd4j_erfc<double>(double num) { return erfc(num); } template<> math_def inline float16 nd4j_erfc<float16>(float16 num) { return (float16) erfcf((float) num); } template<> math_def inline double nd4j_round<double>(double val) { return round(val); } template<> math_def inline int nd4j_round<int>(int val) { return round((float) val); } template<> math_def inline float16 nd4j_sin<float16>(float16 val) { #ifdef NATIVE_HALFS return hsin(val.data); #else return (float16) sinf((float) val); #endif } template<> math_def inline float nd4j_sin<float>(float val) { return sinf(val); } template<> math_def inline double nd4j_sin<double>(double val) { return sin(val); } template<> math_def inline int nd4j_sin<int>(int val) { return sin((float) val); } template<> math_def inline float16 nd4j_sinh<float16>(float16 val) { #ifdef NATIVE_HALFS return hsin(val.data); #else return (float16) sinh((float) val); #endif } template<> math_def inline float nd4j_sinh<float>(float val) { return sinhf(val); } template<> math_def inline double nd4j_sinh<double>(double val) { return sinh(val); } template<> math_def inline int nd4j_sinh<int>(int val) { return sinhf((float) val); } template<> math_def inline float16 nd4j_sqrt<float16>(float16 val) { #ifdef NATIVE_HALFS return hsqrt(val.data); #else return (float16) sqrtf((float) val); #endif } template<> math_def inline float nd4j_sqrt<float>(float val) { return sqrtf(val); } template<> math_def inline double nd4j_sqrt<double>(double val) { return sqrt(val); } template<> math_def inline int nd4j_sqrt<int>(int val) { return sqrtf((float) val); } template<> math_def inline float16 nd4j_tanh<float16>(float16 val) { return (float16) tanhf((float) val); } template<> math_def inline float nd4j_tanh<float>(float val) { return tanhf(val); } template<> math_def inline double nd4j_tanh<double>(double val) { return tanh(val); } template<> math_def inline int nd4j_tanh<int>(int val) { return tanhf((float) val); } template<> math_def inline float16 nd4j_tan<float16>(float16 val) { return (float16) tanf((float) val); } template<> math_def inline float nd4j_tan<float>(float val) { return tanf(val); } template<> math_def inline double nd4j_tan<double>(double val) { return tan(val); } template<> math_def inline int nd4j_tan<int>(int val) { return tanf((float) val); } template<> math_def inline float16 nd4j_acos<float16>(float16 val) { return (float16) acosf((float) val); } template<> math_def inline float nd4j_acos<float>(float val) { return acosf(val); } template<> math_def inline double nd4j_acos<double>(double val) { return acos(val); } template<> math_def inline int nd4j_acos<int>(int val) { return acosf((float) val); } template<> math_def inline float16 nd4j_acosh<float16>(float16 val) { return (float16) acoshf((float) val); } template<> math_def inline float nd4j_acosh<float>(float val) { return acoshf(val); } template<> math_def inline double nd4j_acosh<double>(double val) { return acosh(val); } template<> math_def inline int nd4j_acosh<int>(int val) { return acoshf((float) val); } template<> math_def inline float16 nd4j_asin<float16>(float16 val) { return (float16) asinf((float) val); } template<> math_def inline float nd4j_asin<float>(float val) { return asinf(val); } template<> math_def inline double nd4j_asin<double>(double val) { return asin(val); } template<> math_def inline int nd4j_asin<int>(int val) { return asinf((float) val); } template<> math_def inline float16 nd4j_atan<float16>(float16 val) { return (float16) atanf((float)val); } template<> math_def inline float nd4j_atan<float>(float val) { return atanf(val); } template<> math_def inline double nd4j_atan<double>(double val) { return atan(val); } template<> math_def inline int nd4j_atan<int>(int val) { return atanf((float) val); } template<> math_def inline float16 nd4j_atanh<float16>(float16 val) { return (float16) atanhf((float)val); } template<> math_def inline float nd4j_atanh<float>(float val) { return atanhf(val); } template<> math_def inline double nd4j_atanh<double>(double val) { return atanh(val); } template<> math_def inline int nd4j_atanh<int>(int val) { return atanhf((float) val); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { int* address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val - __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val / __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { int* address_as_ull = (int*) address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val - __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } } #endif } } #endif /* TEMPLATEMATH_H_ */
adjvectorbqm.h
// Copyright 2020 D-Wave Systems Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef DIMOD_ADJVECTORBQM_H_ #define DIMOD_ADJVECTORBQM_H_ #include <stdio.h> #include <algorithm> #include <utility> #include <vector> #include "dimod/utils.h" namespace dimod { template <class V, class B> class AdjVectorBQM { public: using bias_type = B; using variable_type = V; using size_type = std::size_t; using outvars_iterator = typename std::vector<std::pair<V, B>>::iterator; using const_outvars_iterator = typename std::vector<std::pair<V, B>>::const_iterator; // in the future we'd probably like to make this protected std::vector<std::pair<std::vector<std::pair<V, B>>, B>> adj; AdjVectorBQM() {} template <class BQM> explicit AdjVectorBQM(const BQM &bqm) { adj.resize(bqm.num_variables()); for (variable_type v = 0; v < bqm.num_variables(); ++v) { linear(v) = bqm.linear(v); auto span = bqm.neighborhood(v); adj[v].first.insert(adj[v].first.begin(), span.first, span.second); } } /** * Construct a BQM from a dense array. * * @param dense An array containing the biases. Assumed to contain * `num_variables`^2 elements. The upper and lower triangle are summed. * @param num_variables The number of variables. */ template <class B2> AdjVectorBQM(const B2 dense[], size_type num_variables, bool ignore_diagonal = false) { // we know how big our linear is going to be adj.resize(num_variables); bias_type qbias; if (!ignore_diagonal) { for (size_type v = 0; v < num_variables; ++v) { adj[v].second = dense[v * (num_variables + 1)]; } } for (size_type u = 0; u < num_variables; ++u) { for (size_type v = u + 1; v < num_variables; ++v) { qbias = dense[u * num_variables + v] + dense[v * num_variables + u]; if (qbias != 0) { adj[u].first.emplace_back(v, qbias); adj[v].first.emplace_back(u, qbias); } } } } /** * Construct a BQM from a dense array. This constructor is parallelized * and temporarily zeroes out the diagonal of the dense array but restores * it back. * * @param dense An array containing the biases. Assumed to contain * `num_variables`^2 elements. The upper and lower triangle are summed. * @param num_variables The number of variables. */ template <class B2> AdjVectorBQM(B2 dense[], size_type num_variables, bool ignore_diagonal = false) { // we know how big our linear is going to be adj.resize(num_variables); // Backup copy of the diagonal of the dense matrix. std::vector<B2> dense_diagonal(num_variables); if (!ignore_diagonal) { #pragma omp parallel for for (size_type v = 0; v < num_variables; ++v) { adj[v].second = dense[v * (num_variables + 1)]; } } #pragma omp parallel { // Zero out the diagonal to avoid expensive checks inside innermost // loop in the code for reading the matrix. The diagonal will be // restored so a backup copy is saved. #pragma omp for schedule(static) for (size_type v = 0; v < num_variables; ++v) { dense_diagonal[v] = dense[v * (num_variables + 1)]; dense[v * (num_variables + 1)] = 0; } size_type counters[BLOCK_SIZE] = {0}; size_type buffer_size = num_variables * BLOCK_SIZE * sizeof(std::pair<variable_type, bias_type>); std::pair<variable_type, bias_type> *temp_buffer = (std::pair<variable_type, bias_type> *)malloc(buffer_size); if (temp_buffer == NULL) { printf("Memory allocation failure.\n"); exit(0); } // We process the matrix in blocks of size BLOCK_SIZE*BLOCK_SIZE to take // advantage of cache locality. Dynamic scheduling is used as we know some // blocks may be more sparse than others and processing them may finish earlier. #pragma omp for schedule(dynamic) for (size_type u_st = 0; u_st < num_variables; u_st += BLOCK_SIZE) { size_type u_end = std::min(u_st + BLOCK_SIZE, num_variables); for (size_type v_st = 0; v_st < num_variables; v_st += BLOCK_SIZE) { size_type v_end = std::min(v_st + BLOCK_SIZE, num_variables); for (size_type u = u_st, n = 0; u < u_end; u++, n++) { size_type counter_u = counters[n]; size_type counter_u_old = counter_u; for (size_type v = v_st; v < v_end; v++) { bias_type qbias = dense[u * num_variables + v] + dense[v * num_variables + u]; if (qbias != 0) { temp_buffer[n * num_variables + counter_u++] = { v, qbias}; } } if (counter_u != counter_u_old) { counters[n] = counter_u; } } } for (size_type n = 0; n < BLOCK_SIZE; n++) { if (counters[n]) { adj[u_st + n].first.assign( temp_buffer + n * num_variables, temp_buffer + n * num_variables + counters[n]); counters[n] = 0; } } } free(temp_buffer); // Restore the diagonal of the original dense matrix #pragma omp for schedule(static) for (size_type v = 0; v < num_variables; ++v) { dense[v * (num_variables + 1)] = dense_diagonal[v]; } } } /** * Construct a BQM from COO-formated iterators. * * A sparse BQM encoded in [COOrdinate] format is specified by three * arrays of (row, column, value). * * [COOrdinate]: https://w.wiki/n$L * * @param row_iterator Iterator pointing to the beginning of the row data. * Must be a random access iterator. * @param col_iterator Iterator pointing to the beginning of the column * data. Must be a random access iterator. * @param bias_iterator Iterator pointing to the beginning of the bias data. * Must be a random access iterator. * @param length The number of (row, column, bias) entries. * @param ignore_diagonal If true, entries on the diagonal of the sparse * matrix are ignored. */ template <class ItRow, class ItCol, class ItBias> AdjVectorBQM(ItRow row_iterator, ItCol col_iterator, ItBias bias_iterator, size_type length, bool ignore_diagonal = false) { // determine the number of variables so we can allocate adj if (length > 0) { size_type max_label = std::max( *std::max_element(row_iterator, row_iterator + length), *std::max_element(col_iterator, col_iterator + length)); adj.resize(max_label + 1); } // we can get 5-10% speedup on dense problems by counting the degrees // and using that to reserve the neighborhood vectors. However, since // it uses more memory and slows down sparse problems, we don't. // add the values to the adjacency, not worrying about order or // duplicates for (size_type i = 0; i < length; i++) { if (*row_iterator == *col_iterator) { // linear bias if (!ignore_diagonal) { linear(*row_iterator) += *bias_iterator; } } else { // quadratic bias adj[*row_iterator].first.emplace_back(*col_iterator, *bias_iterator); adj[*col_iterator].first.emplace_back(*row_iterator, *bias_iterator); } ++row_iterator; ++col_iterator; ++bias_iterator; } // now sort each neighborhood and remove duplicates for (variable_type v = 0; v < adj.size(); ++v) { auto span = neighborhood(v); // by default sort looks at first element in pair std::sort(span.first, span.second); // now remove any duplicate variables, adding the biases auto it = adj[v].first.begin(); while (it + 1 < adj[v].first.end()) { if (it->first == (it + 1)->first) { it->second += (it + 1)->second; adj[v].first.erase(it + 1); } else { ++it; } } } } /// Add one (disconnected) variable to the BQM and return its index. variable_type add_variable() { adj.resize(adj.size() + 1); return adj.size() - 1; } /// Get the degree of variable `v`. size_type degree(variable_type v) const { return adj[v].first.size(); } [[deprecated("Use AdjVectorBQM::linear(v)")]] bias_type get_linear( variable_type v) const { return linear(v); } std::pair<bias_type, bool> get_quadratic(variable_type u, variable_type v) const { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); assert(u != v); auto span = neighborhood(u); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); if (low == span.second || low->first != v) return std::make_pair(0, false); return std::make_pair(low->second, true); } bias_type &linear(variable_type v) { assert(v >= 0 && v < adj.size()); return adj[v].second; } const bias_type &linear(variable_type v) const { assert(v >= 0 && v < adj.size()); return adj[v].second; } std::pair<outvars_iterator, outvars_iterator> neighborhood( variable_type u) { assert(u >= 0 && u < adj.size()); return std::make_pair(adj[u].first.begin(), adj[u].first.end()); } std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood( variable_type u) const { assert(u >= 0 && u < adj.size()); return std::make_pair(adj[u].first.cbegin(), adj[u].first.cend()); } /** * The neighborhood of variable `v`. * * @param A variable `v`. * @param The neighborhood will start with the first out variable that * does not compare less than `start`. * * @returns A pair of iterators pointing to the start and end of the * neighborhood. */ std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood( variable_type v, variable_type start) const { auto span = neighborhood(v); auto low = std::lower_bound(span.first, span.second, start, utils::comp_v<V, B>); return std::make_pair(low, span.second); } size_type num_variables() const { return adj.size(); } size_type num_interactions() const { size_type count = 0; for (auto it = adj.begin(); it != adj.end(); ++it) count += it->first.size(); return count / 2; } variable_type pop_variable() { assert(adj.size() > 0); variable_type v = adj.size() - 1; // remove v from all of its neighbor's neighborhoods for (auto it = adj[v].first.cbegin(); it != adj[v].first.cend(); ++it) { auto span = neighborhood(it->first); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); adj[it->first].first.erase(low); } adj.pop_back(); return adj.size(); } bool remove_interaction(variable_type u, variable_type v) { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); auto span = neighborhood(u); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); bool exists = !(low == span.second || low->first != v); if (exists) { adj[u].first.erase(low); span = neighborhood(v); low = std::lower_bound(span.first, span.second, u, utils::comp_v<V, B>); assert(!(low == span.second || low->first != u) == exists); adj[v].first.erase(low); } return exists; } [[deprecated("Use AdjVectorBQM::linear(v)")]] void set_linear( variable_type v, bias_type b) { assert(v >= 0 && v < adj.size()); linear(v) = b; } bool set_quadratic(variable_type u, variable_type v, bias_type b) { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); assert(u != v); auto span = neighborhood(u); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); bool exists = !(low == span.second || low->first != v); if (exists) { low->second = b; } else { adj[u].first.emplace(low, v, b); } span = neighborhood(v); low = std::lower_bound(span.first, span.second, u, utils::comp_v<V, B>); assert(!(low == span.second || low->first != u) == exists); if (exists) { low->second = b; } else { adj[v].first.emplace(low, u, b); } // to be consistent with AdjArrayBQM, we return whether the value was // set return true; } }; } // namespace dimod #endif // DIMOD_ADJVECTORBQM_H_
column_matrix.h
/*! * Copyright 2017 by Contributors * \file column_matrix.h * \brief Utility for fast column-wise access * \author Philip Cho */ #ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_ #define XGBOOST_COMMON_COLUMN_MATRIX_H_ #include <limits> #include <vector> #include "hist_util.h" namespace xgboost { namespace common { /*! \brief column type */ enum ColumnType { kDenseColumn, kSparseColumn }; /*! \brief a column storage, to be used with ApplySplit. Note that each bin id is stored as index[i] + index_base. */ class Column { public: Column(ColumnType type, const uint32_t* index, uint32_t index_base, const size_t* row_ind, size_t len) : type_(type), index_(index), index_base_(index_base), row_ind_(row_ind), len_(len) {} size_t Size() const { return len_; } uint32_t GetGlobalBinIdx(size_t idx) const { return index_base_ + index_[idx]; } uint32_t GetFeatureBinIdx(size_t idx) const { return index_[idx]; } // column.GetFeatureBinIdx(idx) + column.GetBaseIdx(idx) == // column.GetGlobalBinIdx(idx) uint32_t GetBaseIdx() const { return index_base_; } ColumnType GetType() const { return type_; } size_t GetRowIdx(size_t idx) const { // clang-tidy worries that row_ind_ might be a nullptr, which is possible, // but low level structure is not safe anyway. return type_ == ColumnType::kDenseColumn ? idx : row_ind_[idx]; // NOLINT } bool IsMissing(size_t idx) const { return index_[idx] == std::numeric_limits<uint32_t>::max(); } const size_t* GetRowData() const { return row_ind_; } private: ColumnType type_; const uint32_t* index_; uint32_t index_base_; const size_t* row_ind_; const size_t len_; }; /*! \brief a collection of columns, with support for construction from GHistIndexMatrix. */ class ColumnMatrix { public: // get number of features inline bst_uint GetNumFeature() const { return static_cast<bst_uint>(type_.size()); } // construct column matrix from GHistIndexMatrix inline void Init(const GHistIndexMatrix& gmat, double sparse_threshold) { const int32_t nfeature = static_cast<int32_t>(gmat.cut.row_ptr.size() - 1); const size_t nrow = gmat.row_ptr.size() - 1; // identify type of each column feature_counts_.resize(nfeature); type_.resize(nfeature); std::fill(feature_counts_.begin(), feature_counts_.end(), 0); uint32_t max_val = std::numeric_limits<uint32_t>::max(); for (bst_uint fid = 0; fid < nfeature; ++fid) { CHECK_LE(gmat.cut.row_ptr[fid + 1] - gmat.cut.row_ptr[fid], max_val); } gmat.GetFeatureCounts(&feature_counts_[0]); // classify features for (int32_t fid = 0; fid < nfeature; ++fid) { if (static_cast<double>(feature_counts_[fid]) < sparse_threshold * nrow) { type_[fid] = kSparseColumn; } else { type_[fid] = kDenseColumn; } } // want to compute storage boundary for each feature // using variants of prefix sum scan boundary_.resize(nfeature); size_t accum_index_ = 0; size_t accum_row_ind_ = 0; for (int32_t fid = 0; fid < nfeature; ++fid) { boundary_[fid].index_begin = accum_index_; boundary_[fid].row_ind_begin = accum_row_ind_; if (type_[fid] == kDenseColumn) { accum_index_ += static_cast<size_t>(nrow); accum_row_ind_ += static_cast<size_t>(nrow); } else { accum_index_ += feature_counts_[fid]; accum_row_ind_ += feature_counts_[fid]; } boundary_[fid].index_end = accum_index_; boundary_[fid].row_ind_end = accum_row_ind_; } index_.resize(boundary_[nfeature - 1].index_end); row_ind_.resize(boundary_[nfeature - 1].row_ind_end); // store least bin id for each feature index_base_.resize(nfeature); for (bst_uint fid = 0; fid < nfeature; ++fid) { index_base_[fid] = gmat.cut.row_ptr[fid]; } // pre-fill index_ for dense columns #pragma omp parallel for for (int32_t fid = 0; fid < nfeature; ++fid) { if (type_[fid] == kDenseColumn) { const size_t ibegin = boundary_[fid].index_begin; uint32_t* begin = &index_[ibegin]; uint32_t* end = begin + nrow; std::fill(begin, end, std::numeric_limits<uint32_t>::max()); // max() indicates missing values } } // loop over all rows and fill column entries // num_nonzeros[fid] = how many nonzeros have this feature accumulated so far? std::vector<size_t> num_nonzeros; num_nonzeros.resize(nfeature); std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0); for (size_t rid = 0; rid < nrow; ++rid) { const size_t ibegin = gmat.row_ptr[rid]; const size_t iend = gmat.row_ptr[rid + 1]; size_t fid = 0; for (size_t i = ibegin; i < iend; ++i) { const uint32_t bin_id = gmat.index[i]; while (bin_id >= gmat.cut.row_ptr[fid + 1]) { ++fid; } if (type_[fid] == kDenseColumn) { uint32_t* begin = &index_[boundary_[fid].index_begin]; begin[rid] = bin_id - index_base_[fid]; } else { uint32_t* begin = &index_[boundary_[fid].index_begin]; begin[num_nonzeros[fid]] = bin_id - index_base_[fid]; row_ind_[boundary_[fid].row_ind_begin + num_nonzeros[fid]] = rid; ++num_nonzeros[fid]; } } } } /* Fetch an individual column. This code should be used with XGBOOST_TYPE_SWITCH to determine type of bin id's */ inline Column GetColumn(unsigned fid) const { Column c(type_[fid], &index_[boundary_[fid].index_begin], index_base_[fid], (type_[fid] == ColumnType::kSparseColumn ? &row_ind_[boundary_[fid].row_ind_begin] : nullptr), boundary_[fid].index_end - boundary_[fid].index_begin); return c; } private: struct ColumnBoundary { // indicate where each column's index and row_ind is stored. // index_begin and index_end are logical offsets, so they should be converted to // actual offsets by scaling with packing_factor_ size_t index_begin; size_t index_end; size_t row_ind_begin; size_t row_ind_end; }; std::vector<size_t> feature_counts_; std::vector<ColumnType> type_; SimpleArray<uint32_t> index_; // index_: may store smaller integers; needs padding SimpleArray<size_t> row_ind_; std::vector<ColumnBoundary> boundary_; // index_base_[fid]: least bin id for feature fid std::vector<uint32_t> index_base_; }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" // enable tests #define CHECK 1 #define DEBUG 0 #define N (992) #define INIT() INIT_LOOP(N, {A[i] = 0; C[i] = 1; D[i] = i; E[i] = -i;}) int main(void){ #if CHECK check_offloading(); #endif /* * Default device */ printf("Is%s initial device\n", omp_is_initial_device() ? "" : " not"); printf("Initial device: %d\n", omp_get_initial_device()); omp_set_default_device(1); printf("Default device before task: %d\n", omp_get_default_device()); #pragma omp task { printf("Default device inside task: %d\n", omp_get_default_device()); omp_set_default_device(2); printf("Default device inside task after resetting: %d\n", omp_get_default_device()); } printf("Default device outside task: %d\n", omp_get_default_device()); // default device can set to whatever, if target fails, it goes to the host const int default_device = 0; omp_set_default_device(default_device); // default device for omp target call MUST be >= 0 and <omp_get_num_devices() or // the initial device. So when there are no devices, it must be the initial device int default_device_omp_target_call = default_device; if (omp_get_num_devices() == 0) { default_device_omp_target_call = omp_get_initial_device(); } #if DEBUG printf("test on machine with %d devices\n", omp_get_num_devices()); #endif /* * Target alloc & target memcpy */ double A[N], B[N], C[N], D[N], E[N]; double *pA, *pB, *pC, *pD, *pE; // map ptrs pA = &A[0]; pB = &B[0]; pC = &C[0]; pD = &D[0]; pE = &E[0]; INIT(); pA = pA - 10; pC = pC - 20; pD = pD - 30; void *device_A = omp_target_alloc(N*sizeof(double), default_device_omp_target_call); void *device_C = omp_target_alloc(N*sizeof(double), default_device_omp_target_call); void *device_D = omp_target_alloc(N*sizeof(double), default_device_omp_target_call); double *dpA = (double *) device_A - 100; double *dpC = (double *) device_C - 200; double *dpD = (double *) device_D - 300; printf("omp_target_alloc %s\n", device_A && device_C && device_D ? "succeeded" : "failed"); omp_target_memcpy(dpC, pC, N*sizeof(double), 200*sizeof(double), 20*sizeof(double), default_device_omp_target_call, omp_get_initial_device()); omp_target_memcpy(dpD, pD, N*sizeof(double), 300*sizeof(double), 30*sizeof(double), default_device_omp_target_call, omp_get_initial_device()); #pragma omp target is_device_ptr(dpA, dpC, dpD) device(default_device) { #pragma omp parallel for schedule(static,1) for (int i = 0; i < 992; i++) dpA[i+100] = dpC[i+200] + dpD[i+300] + 1; } omp_target_memcpy(pA, dpA, N*sizeof(double), 10*sizeof(double), 100*sizeof(double), omp_get_initial_device(), default_device_omp_target_call); int fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test omp_target_memcpy: Failed\n"); } else { printf ("Test omp_target_memcpy: Succeeded\n"); } /* * target_is_present and target_associate/disassociate_ptr */ INIT(); if (offloading_disabled()) { // If offloading is disabled just recreate the messages so that this can // also be tested with no device. printf("C is not present, associating it...\n"); printf("omp_target_associate_ptr C %s\n", 1 ? "succeeded" : "failed"); } else if (!omp_target_is_present(C, default_device_omp_target_call)) { printf("C is not present, associating it...\n"); int rc = omp_target_associate_ptr(C, dpC, N*sizeof(double), 200*sizeof(double), default_device_omp_target_call); printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed"); } if (offloading_disabled()) { // If offloading is disabled just recreate the messages so that this can // also be tested with no device. printf("D is not present, associating it...\n"); printf("omp_target_associate_ptr D %s\n", 1 ? "succeeded" : "failed"); } else if (!omp_target_is_present(D, default_device_omp_target_call)) { printf("D is not present, associating it...\n"); int rc = omp_target_associate_ptr(D, dpD, N*sizeof(double), 300*sizeof(double), default_device_omp_target_call); printf("omp_target_associate_ptr D %s\n", !rc ? "succeeded" : "failed"); } #pragma omp target data map(from: C, D) device(default_device) { printf("Inside target data: A is%s present\n", (omp_target_is_present(A, default_device_omp_target_call) && !offloading_disabled()) ? "" : " not"); printf("Inside target data: C is%s present\n", omp_target_is_present(C, default_device_omp_target_call) ? "" : " not"); printf("Inside target data: D is%s present\n", omp_target_is_present(D, default_device_omp_target_call) ? "" : " not"); // C and D are mapped "from", so there is no copy from host to device. // If the association was successful, their corresponding device arrays // are already populated from previous omp_target_memcpy with the correct // values and the following target for-loop must yield the correct results. #pragma omp target map(from: A) device(default_device) { #pragma omp parallel for schedule(static,1) for (int i = 0; i < 992; i++) A[i] = C[i] + D[i] + 1; } } if (offloading_disabled()) { printf("C is present, disassociating it...\n"); printf("omp_target_disassociate_ptr C %s\n", 1 ? "succeeded" : "failed"); } else if (omp_target_is_present(C, default_device_omp_target_call)) { printf("C is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(C, default_device_omp_target_call); printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed"); } if (offloading_disabled()) { printf("D is present, disassociating it...\n"); printf("omp_target_disassociate_ptr D %s\n", 1 ? "succeeded" : "failed"); } else if (omp_target_is_present(D, default_device_omp_target_call)) { printf("D is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(D, default_device_omp_target_call); printf("omp_target_disassociate_ptr D %s\n", !rc ? "succeeded" : "failed"); } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test omp_target_associate_ptr: Failed\n"); } else { printf ("Test omp_target_associate_ptr: Succeeded\n"); } omp_target_free(device_A, default_device_omp_target_call); omp_target_free(device_C, default_device_omp_target_call); omp_target_free(device_D, default_device_omp_target_call); return 0; }
pasha.h
#ifndef PASHA_H #define PASHA_H #include <iostream> #include <fstream> #include <cmath> #include <string> #include <vector> #include <algorithm> #include <initializer_list> #include <map> #include <cstdlib> #include <iomanip> #include <cstdint> #include <omp.h> #include <limits> using namespace std; using unsigned_int = uint64_t; using byte = uint8_t; class PASHA { public: byte* finished; byte* pick; byte* used; double delta; double epsilon; double* hittingNumArray; float** D; float* Fcurr; float* Fprev; //byte** Dexp; //byte** Dval; unsigned_int ALPHABET_SIZE; unsigned_int edgeCount; unsigned_int edgeNum; unsigned_int k; unsigned_int l; unsigned_int h; double count; int exit; unsigned_int total; unsigned_int curr; unsigned_int vertexCount; unsigned_int vertexExp; unsigned_int vertexExp2; unsigned_int vertexExp3; unsigned_int vertexExpMask; unsigned_int vertexExp_1; byte* edgeArray; byte* stageArray; int* topoSort; map<char, unsigned_int> alphabetMap; string ALPHABET; vector<unsigned_int> stageVertices; PASHA (unsigned_int argK) { /** Definition of a graph object. Generates a graph of order k, creates an empty edge index array, calculates number of edges, builds a character-index map. @param argK: Argument passed as k-mer length. */ ALPHABET = "ACGT"; ALPHABET_SIZE = 4; k = argK; edgeNum = static_cast<unsigned_int>(pow(ALPHABET_SIZE, k)); edgeArray = new byte[static_cast<unsigned_int>(edgeNum)]; cout << k << ALPHABET_SIZE << edgeNum << endl; generateGraph(k); map<char, unsigned_int> alphabetMap; } void generateGraph(unsigned_int k) { /** Generates a complete de Bruijn graph of order k. @param k: Desired k-mer length (order of complete graph). */ for (unsigned_int i = 0; i < edgeNum; i++) edgeArray[i] = 1; edgeCount = edgeNum; vertexCount = edgeNum / ALPHABET_SIZE; } char getChar(unsigned_int i) { /** Gets alphabet character from index. @param i: Index of character. @return The character in the alphabet. */ return ALPHABET[i]; } string getLabel(unsigned_int i) { /** Gets label of the input edge index. @param i: Index of edge. @return The label of the edge. */ string finalString = ""; for (unsigned_int j = 0; j < k; j++) { finalString = getChar((i % ALPHABET_SIZE)) + finalString; i = i / ALPHABET_SIZE; } return finalString; } unsigned_int getIndex(string label) { /** Gets index of the input edge label. @param i: label of edge. @return The index of the edge. */ map<char, unsigned_int> alphabetMap; for (unsigned_int i = 0; i < ALPHABET_SIZE; i++) alphabetMap.insert(pair<char,unsigned_int>(ALPHABET[i], i)); unsigned_int index = 0; for (unsigned_int j = 0; j < k; j++) { //cout << alphabetMap[label[j]] << endl; index += alphabetMap[label[j]] * pow(4, k-j-1); //cout << index << endl; } return index; } int maxLength() { /** Calculates the length of the maximum length path in the graph. @return maxDepth: Maximum length. */ vector<int> depth(vertexExp); int maxDepth = -1; for (unsigned_int i = 0; i < vertexExp; i++) { int maxVertDepth = -1; for (unsigned_int j = 0; j < ALPHABET_SIZE; j++) { unsigned_int edgeIndex = topoSort[i] + j * vertexExp; unsigned_int vertexIndex = edgeIndex / ALPHABET_SIZE; if ((depth[vertexIndex] > maxVertDepth) && (edgeArray[edgeIndex] == 1)) maxVertDepth = depth[vertexIndex]; } depth[topoSort[i]] = maxVertDepth + 1; if (depth[topoSort[i]] > maxDepth) {maxDepth = depth[topoSort[i]];} } return maxDepth; } void removeEdge(unsigned_int i) { /** Removes an edge from the graph. @param i: Index of edge. */ if (edgeArray[i] == 1) edgeCount--; edgeArray[i] = 0; } void topologicalSort() { /** Traverses the graph in topological order. */ for (unsigned_int i = 0; i < vertexExp; i++) {used[i] = false; finished[i] = false;} int index = 0; for (unsigned_int i = 0; i < vertexExp; i++) { if (used[i] == false) { index = depthFirstSearch(index, i); if (index == -1) {topoSort = NULL; return;} } } // int rc[vertexExp]; //for (int i = 0; i < vertexExp; i++) rc[i] = topoSort[vertexExp-i-1]; } int depthFirstSearch(int index, unsigned_int u) { /** Depth-first search of a given index of an edge. @param index: Depth of recursion, u: Index of edge. @return -1: The search cycles, index+1: Current depth. */ used[u] = true; bool cycle = false; for (unsigned_int v : getAdjacent(u)) { if (used[v] == true && finished[v] == false) cycle = true; if (used[v] == false) { index = depthFirstSearch(index, v); cycle = cycle || (index == -1); } } finished[u] = true; topoSort[index] = u; if (cycle) return -1; else return index + 1; } vector<unsigned_int> getAdjacent(unsigned_int v) { /** Get adjacent vertices to a given index of a vertex. @param v: Index of vertex. @return rc: Array of adjacent vertices. */ unsigned_int count = 0; unsigned_int adjVertex[ALPHABET_SIZE]; for (unsigned_int i = 0; i < ALPHABET_SIZE; i++) { unsigned_int index = v + i * vertexExp; if (edgeArray[index] == 1) adjVertex[count++] = index / ALPHABET_SIZE; } vector<unsigned_int> rc(count); for (unsigned_int i = 0; i < count; i++) { rc[i] = adjVertex[i]; } return rc; } unsigned_int HittingRandomParallel(unsigned_int L, const char *hittingPath, unsigned_int threads) { /** Performs hitting set calculations with parallelization and with randomization, counting L-k+1-long paths. @param L: Sequence length, hittingFile: Output file destination. @return hittingCount: Size of hitting set. */ srand (1); omp_set_dynamic(0); vertexExp = pow(ALPHABET_SIZE, k-1); ofstream hittingStream(hittingPath); int hittingCount = 0; l = L-k+1; delta = 1/(double)l; epsilon = (1-8*(delta))/4; //delta = 0.08333333; //epsilon = 0.08333333; double alpha = 1 - 4*delta -2*epsilon; cout << "Alpha: " << 1/alpha << endl; cout << "Delta: " << delta << endl; cout << "Epsilon: " << epsilon << endl; unsigned_int i; unsigned_int j; hittingNumArray = new double[(unsigned_int)edgeNum]; stageArray = new byte[(unsigned_int)edgeNum]; used = new byte[vertexExp]; finished = new byte[vertexExp]; pick = new byte[(unsigned_int)edgeNum]; topoSort = new int[vertexExp]; D = new float*[l + 1]; //Dexp = new byte*[l + 1]; //float* Dpool = new float[(l+1)* vertexExp]; for(unsigned_int i = 0; i < l+1; i++) {D[i] = new float[vertexExp];} //hittingStream.open(hittingFile); Fcurr = new float[vertexExp]; Fprev = new float[vertexExp]; //double* Fpool = new double[(l+1)* vertexExp]; // for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool; calculatePaths(l, threads); unsigned_int imaxHittingNum = calculateHittingNumberParallel(l, false, threads); cout << "Max hitting number: " << hittingNumArray[imaxHittingNum] << endl; h = findLog((1.0+epsilon), hittingNumArray[imaxHittingNum]); double prob = delta/(double)l; while (h > 0) { //cout << h << endl; total = 0; unsigned_int hittingCountStage = 0; double pathCountStage = 0; calculatePaths(l, threads); imaxHittingNum = calculateHittingNumberParallel(l, true, threads); if (exit == -1) break; stageVertices = pushBackVector(); #pragma omp parallel for num_threads(threads) for (unsigned_int it = 0; it < stageVertices.size(); it++) { i = stageVertices[it]; #pragma omp critical if ((pick[i] == false) && (hittingNumArray[i] > (pow(delta, 3) * total))) { stageArray[i] = 0; pick[i] = true; hittingCountStage++; pathCountStage += hittingNumArray[i]; } } #pragma omp parallel for collapse (2) num_threads(threads) for (unsigned_int it = 0; it < stageVertices.size(); it++) { for (unsigned_int jt = 0; jt < stageVertices.size(); jt++) { i = stageVertices[it]; #pragma omp critical if (pick[i] == false) { if (((double) rand() / (RAND_MAX)) <= prob) { stageArray[i] = 0; pick[i] = true; hittingCountStage += 1; pathCountStage += hittingNumArray[i]; } j = stageVertices[jt]; if (pick[j] == false) { if (((double) rand() / (RAND_MAX)) <= prob) { stageArray[j] = 0; pick[j] = true; hittingCountStage += 1; pathCountStage += hittingNumArray[j]; } else pick[i] = false; } } } } hittingCount += hittingCountStage; #pragma omp barrier if (pathCountStage >= hittingCountStage * pow((1.0 + epsilon), h) * (1 - 4*delta - 2*epsilon)) { for (unsigned_int it = 0; it < stageVertices.size(); it++) { i = stageVertices[it]; if (pick[i] == true) { removeEdge(i); string label = getLabel(i); hittingStream << label << "\n"; //cout << label << endl; } } h--; } else hittingCount -= hittingCountStage; } hittingStream.close(); topologicalSort(); cout << "Length of longest remaining path: " << maxLength() << "\n"; return hittingCount; } //void calculateForEach(int i, int L) { /** Calculates hitting number for an edge of specified index with respect to a specified sequence length, counting paths of length L-k+1. @param i: Index of edge, L: Sequence length. */ // double hittingNum = 0; // for (int j = (1 - edgeArray[i]) * L; j < L; j++) hittingNum = hittingNum + F[j][i % vertexExp] * D[(L-j-1)][i / ALPHABET_SIZE]; //hittingNumArray[i] = hittingNum; //} unsigned_int calculateHittingNumberParallel(unsigned_int L, bool random, unsigned_int threads) { /** Calculates hitting number of all edges, counting paths of length L-k+1, in parallel. @param L: Sequence length. @return imaxHittingNum: Index of vertex with maximum hitting number. */ omp_set_dynamic(0); double maxHittingNum = 0; unsigned_int imaxHittingNum = 0; unsigned_int count = 0; exit = -1; #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < (unsigned_int)edgeNum; i++) { // calculateForEach(i, L); if (random == true) { if (((hittingNumArray[i]) >= pow((1.0+epsilon), h-1)) && ((hittingNumArray[i]) <= pow((1.0+epsilon), h))) { stageArray[i] = 1; pick[i] = false; total += hittingNumArray[i] * stageArray[i]; count++; } else { stageArray[i] = 0; pick[i] = false; } } } for (unsigned_int i = 0; i < (unsigned_int)edgeNum; i++) { if ((hittingNumArray[i])*edgeArray[i] > maxHittingNum) { maxHittingNum = hittingNumArray[i]; imaxHittingNum = i; exit = 0; } } return imaxHittingNum; } int calculatePaths(unsigned_int L, unsigned_int threads) { /** Calculates number of L-k+1 long paths for all vertices. @param L: Sequence length. @return 1: True if path calculation completes. */ omp_set_dynamic(0); curr = 1; vertexExp2 = vertexExp * 2; vertexExp3 = vertexExp * 3; vertexExpMask = vertexExp - 1; vertexExp_1 = pow(ALPHABET_SIZE, k-2); //double maxD = 0; // double maxF = 0; //Fexp(res) = max(Fexp1, Fexp2, Fexp3, Fexp4) // Fval(res) = [Fval1 >> (Fexp - Fexp1)] + [Fval2 >> (Fexp - Fexp2)] + [Fval3 >> (Fexp - Fexp3)] + [Fval4 >> (Fexp - Fexp4)] #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < vertexExp; i++) {D[0][i] = 1.4e-45; Fprev[i] = 1.4e-45;} for (unsigned_int j = 1; j <= L; j++) { #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < vertexExp; i++) { //uint8_t r1; //uint8_t r2; //uint8_t r3; //r1 = (uint8_t)edgeArray[i]*Dexp[j-1][(i >> 2)] ^ (((uint8_t)(edgeArray[i]*Dexp[j-1][(i >> 2)]) ^ (uint8_t)edgeArray[i + vertexExp]*Dexp[j-1][((i + vertexExp) >> 2)])) & -((uint8_t)(edgeArray[i]*Dexp[j-1][(i >> 2)]) < ((uint8_t)edgeArray[i + vertexExp]*Dexp[j-1][((i + vertexExp) >> 2)])); //r2 = (uint8_t)edgeArray[i + vertexExp2]*Dexp[j-1][((i + vertexExp2) >> 2)] ^ (((uint8_t)(edgeArray[i + vertexExp2]*Dexp[j-1][((i + vertexExp2) >> 2)]) ^ (uint8_t)edgeArray[i + vertexExp3]*Dexp[j-1][((i + vertexExp3) >> 2)])) & -((uint8_t)(edgeArray[i]*Dexp[j-1][((i + vertexExp) >> 2)]) < ((uint8_t)edgeArray[i + vertexExp3]*Dexp[j-1][((i + vertexExp3) >> 2)])); //r3 = (uint8_t)r1 ^ ((r1 ^ r2) & -(r1 < r2)); //Dexp[j][i] = r3; //Dval[j][i] = (Dval[j-1][(i >> 2)] >> (Dexp[j][i] - Dexp[j-1][(i >> 2)]))*edgeArray[i] + (Dval[j-1][((i + vertexExp) >> 2)] >> (Dexp[j][i] - Dexp[j-1][((i + vertexExp) >> 2)]))*edgeArray[i + vertexExp] + (Dval[j-1][((i + vertexExp2) >> 2)] >> (Dexp[j][i] - Dexp[j-1][((i + vertexExp2) >> 2)]))*edgeArray[i + vertexExp2] + (Dval[j-1][((i + vertexExp3) >> 2)] >> (Dexp[j][i] - Dexp[j-1][((i + vertexExp3) >> 2)]))*edgeArray[i + vertexExp3]; //Dexp[j][i] = Dexp[j][i] + ((128 & Dval[j][i]) >> 7); //Dval[j][i] = Dval[j][i] >> ((128 & Dval[j][i]) >> 7); //Dexp[j][i] = Dexp[j][i] + ((64 & Dval[j][i]) >> 6); //Dval[j][i] = Dval[j][i] >> ((64 & Dval[j][i]) >> 6); D[j][i] = edgeArray[i]*D[j-1][(i >> 2)] + edgeArray[i + vertexExp]*D[j-1][((i + vertexExp) >> 2)] + edgeArray[i + vertexExp2]*D[j-1][((i + vertexExp2) >> 2)] + edgeArray[i + vertexExp3]*D[j-1][((i + vertexExp3) >> 2)]; //cout << (float)(Dval[j][i] * pow(2, Dexp[j][i])) << endl; //D[j][i] = Dval[i]; //if ((float)(Dval[j][i] * pow(2, Dexp[j][i])) > maxD) maxD = ((float)Dval[j][i] * pow(2, Dexp[j][i])); } //if (maxD > std::numeric_limits<half>::max()/4) { // for (unsigned_int i = 0; i < vertexExp; i++) D[j][i] = D[j][i] * 0.5; //} } #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < (unsigned_int)edgeNum; i++) hittingNumArray[i] = 0; while (curr <= L) { #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < vertexExp; i++) { unsigned_int index = (i * 4); Fcurr[i] = (edgeArray[index]*Fprev[index & vertexExpMask] + edgeArray[index + 1]*Fprev[(index + 1) & vertexExpMask] + edgeArray[index + 2]*Fprev[(index + 2) & vertexExpMask] + edgeArray[index + 3]*Fprev[(index + 3) & vertexExpMask]); //cout << "Fexp: " << Fexp[i] << endl; //Fval[i] = (Fprev[index & vertexExpMask] >> (Fexp[i] - Fexp[index & vertexExpMask])) + (Fprev[(index+1) & vertexExpMask] >> (Fexp[i] - Fexp[(index+1) & vertexExpMask])) + (Fprev[(index+2) & vertexExpMask] >> (Fexp[i] - Fexp[(index+2) & vertexExpMask])) + (Fprev[(index+3) & vertexExpMask] >> (Fexp[i] - Fexp[(index+3) & vertexExpMask])); //if (Fval[i] > 63) { // Fexp[i] = Fexp[i] + 1; // Fval[i] = Fval[i] >> 1; //} //if (Fcurr[i] > maxF) maxF = Fcurr[i]; //cout << Fval[i] << endl; } #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < (unsigned_int)edgeNum; i++) { //cout << Fprev[i % vertexExp] << " " << ((float)(Dval[(L-curr)][i / ALPHABET_SIZE] * pow(2, Dexp[(L-curr)][i / ALPHABET_SIZE]))) << endl; hittingNumArray[i] += (Fprev[i % vertexExp]/1.4e-45) * (D[(L-curr)][i / ALPHABET_SIZE]/1.4e-45); //cout << hittingNumArray[i] << endl; if (edgeArray[i] == 0) hittingNumArray[i] = 0; } #pragma omp parallel for num_threads(threads) for (unsigned_int i = 0; i < vertexExp; i++) Fprev[i] = Fcurr[i]; curr++; //cout << curr << endl; } //cout << "MaxD: " << maxD << " maxF: " << maxF << endl; return 1; } int findLog(double base, double x) { /** Finds the logarithm of a given number with respect to a given base. @param base: Base of logartihm, x: Input number. @return (int)(log(x) / log(base)): Integer logarithm of the number and the given base. */ return (int)(log(x) / log(base)); } vector<unsigned_int> pushBackVector() { vector<unsigned_int> stageVertices; for(unsigned_int i = 0; i < (unsigned_int)edgeNum; i++) { if (stageArray[i] == 1) stageVertices.push_back(i); } return stageVertices; } }; #endif
hecmw_util.c
/***************************************************************************** * Copyright (c) 2019 FrontISTR Commons * This software is released under the MIT License, see LICENSE.txt *****************************************************************************/ #include <stdio.h> #include <string.h> #include <stdarg.h> #include <time.h> #include <ctype.h> #include <errno.h> #include "hecmw_config.h" #include "hecmw_util.h" void HECMW_fprintf(FILE *fp, char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(fp, fmt, ap); va_end(ap); } void HECMW_printerr(char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } char *HECMW_get_date(void) { int rc; time_t now; static char static_buf[100]; if (time(&now) == (time_t)-1) return NULL; rc = strftime(static_buf, sizeof(static_buf), "%b %d %H:%M:%S", localtime(&now)); return rc ? static_buf : NULL; } /* thread-save version of HECMW_get_date */ char *HECMW_get_date_r(char *buf, int len) { int rc; time_t now; struct tm result; if (time(&now) == (time_t)-1) return NULL; #if defined(__WIN32__) || defined(__WIN64__) /* localtime_r is not available on Windows */ #pragma omp critical { rc = strftime(buf, len, "%b %d %H:%M:%S", localtime(&now)); } #else if (localtime_r(&now, &result) == NULL) return NULL; rc = strftime(buf, len, "%b %d %H:%M:%S", &result); #endif return rc ? buf : NULL; } void HECMW_assert_(int cond, char *cond_str, char *file, int line) { if (!cond) { fprintf(stderr, "%s:%d: Assersion `%s' failed.\n", file, line, cond_str); #ifdef HECMW_SERIAL abort(); #else MPI_Abort(MPI_COMM_WORLD, HECMW_EXIT_ERROR); #endif } } int HECMW_check_condition_(int cond, char *cond_str, int isabort, char *file, int line) { if (cond) return 0; if (isabort) { fprintf(stderr, "%s:%d: Assertion `%s' falied.\n", file, line, cond_str); #ifdef HECMW_SERIAL abort(); #else MPI_Abort(MPI_COMM_WORLD, HECMW_EXIT_ERROR); #endif } return 1; } void HECMW_abort(HECMW_Comm comm) { #ifdef HECMW_SERIAL abort(); #else /* HECMW_comm_is_initialized() ? MPI_Abort(comm, HECMW_EXIT_ERROR) : * abort(); */ if (HECMW_comm_is_initialized()) { MPI_Abort(comm, HECMW_EXIT_ERROR); } else { abort(); } #endif } char *HECMW_toupper(char *s) { char *p; if (s == NULL) return NULL; for (p = s; *p; p++) { *p = (char)toupper(*p); } return s; } char *HECMW_tolower(char *s) { char *p; if (s == NULL) return NULL; for (p = s; *p; p++) { *p = (char)tolower(*p); } return s; } void HECMW_print_error(void) { HECMW_log(HECMW_LOG_ERROR, HECMW_get_errmsg()); } void HECMW_print_vmsg(int loglv, int msgno, const char *fmt, va_list ap) { char msg[HECMW_MSG_LEN + 1]; char vmsg[HECMW_MSG_LEN + 1]; HECMW_snprintf(msg, sizeof(msg), "%s", HECMW_strmsg(msgno)); HECMW_vsnprintf(vmsg, sizeof(vmsg), fmt, ap); if (strlen(vmsg) > 0) { HECMW_snprintf(msg + strlen(msg), sizeof(msg) - strlen(msg), " (%s)", vmsg); } HECMW_log(loglv, msg); } void HECMW_print_msg(int loglv, int msgno, const char *fmt, ...) { va_list ap; va_start(ap, fmt); HECMW_print_vmsg(loglv, msgno, fmt, ap); va_end(ap); } int HECMW_vsnprintf(char *str, size_t size, const char *format, va_list ap) { #ifdef WIN32_MSVC return _vsnprintf(str, size, format, ap); #else return vsnprintf(str, size, format, ap); #endif } int HECMW_snprintf(char *str, size_t size, const char *format, ...) { va_list ap; int rtc; va_start(ap, format); #ifdef WIN32_MSVC rtc = _vsnprintf(str, size, format, ap); #else rtc = vsnprintf(str, size, format, ap); #endif va_end(ap); return rtc; }
time.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define Length 1.0 #define Temperature_1 1.0 #define Temperature_2 5.0 int main(int argc, char **argv) { // Время, когда требуется посчитать распределение температуры в стержне double Time = 1.0; // Число разбиений по координате size_t M = 10; // Количество паралельных процессов size_t size = 1; if (argc > 1) { // Считываем время, когда хотим узнать распределение температуры // в стержне Time = atof(argv[1]); if (Time < 0) { printf("Sorry, timemachine hasn't been invented yet!"); return EXIT_FAILURE; } if (argc > 2) { // Число разбиений по координате M = atoll(argv[2]); if (M < 2) { // Иначе метод не сходится printf("Invalid values!\n"); return EXIT_FAILURE; } if (argc > 3) { size = atoll(argv[3]); if (M <= size) { // Если мелкость разбиения координаты настолько мала, // что не будут использованы все процессы printf("Required number of processes is unreasonable \ compared to coordinate partition!\n"); return EXIT_FAILURE; } } } } // Шаг по координате double h = Length / M; // Шаг по времени (число Куранта) double tau = 0.3 * h * h; // Число разбиений по времени int N = Time / tau; // Массивы температуры для момента времени n и n + 1 соответственно double *u0 = (double*) malloc(sizeof(double) * M); double *u1 = (double*) malloc(sizeof(double) * M); // Счетчики для циклов по времени и координате size_t m, n; // Начальные условия (f(x) = 0 ) for (m = 0; m < M; m++) { u0[m] = u1[m] = 0.0; } // Задаем граничные условия u0[0] = u1[0] = Temperature_1; u0[M - 1] = u1[M - 1] = Temperature_2; // Массив индексов передаваемых точек size_t *left_index = (size_t*) malloc(sizeof(size_t) * size + 1); left_index[0] = 1; // Чтобы избежать костылей при передаче массивов 0-ому процессу, // определяю правый конец последнего массива left_index[size] = M - 1; // Итеративно определяю левые концы отрезков, передаваемые каждому процессу // Правый конец i-го процесса = левому концу (i + 1)-го for (int i = 1; i < size; i++) { left_index[i] = left_index[i - 1] + (M / size) + ((i - 1) < ((M % size) - 2)); } // Создание массив замков omp_lock_t* lock = (omp_lock_t*) malloc(sizeof(omp_lock_t) * 2 * size); // Инициализация массива замков for (size_t i = 0; i < 2 * size; ++i) { omp_init_lock(&lock[i]); } // Вспомогательная переменная, показывающая кол-во процессов, // закончивших данную итерацию цикла size_t epoc = 0; double time = 0.0; // Задаем кол-во процессов для следующего распараллеливания omp_set_num_threads(size); for (size_t j = 0; j < numexp; j++) { // Начинаем отсчет времени double start = omp_get_wtime(); #pragma omp parallel private(n, m) { size_t id = omp_get_thread_num(); // Цикл по времени for (n = 0; n < N; n++) { // Обнуляем глобальную эпоху #pragma omp atomic epoc = 0; // Явная четырехточечная схема for (m = left_index[id]; m < left_index[id + 1]; ++m) { if ((m == left_index[id]) && (id != 0)) { // Запоминаем боковой узел omp_set_lock(&lock[id - 1 + size]); double left = u0[left_index[id] - 1]; omp_unset_lock(&lock[id - 1 + size]); // Проводим защищенно вычисления omp_set_lock(&lock[id]); u1[m] = u0[m] + 0.3 * (left - 2.0 * u0[m] + u0[m + 1]); omp_unset_lock(&lock[id]); } if ((m == left_index[id + 1] - 1) && (id != size - 1)) { // Запоминаем боковой узел omp_set_lock(&lock[id + 1]); double right = u0[left_index[id + 1]]; omp_unset_lock(&lock[id + 1]); // Проводим защищенно вычисления omp_set_lock(&lock[id + size]); u1[m] = u0[m] + 0.3 * (u0[m - 1] - 2.0 * u0[m] + right); omp_unset_lock(&lock[id + size]); } u1[m] = u0[m] + 0.3 * (u0[m - 1] - 2.0 * u0[m] + u0[m + 1]); } // Атомарно инкрементируем -> процесс закончил работу #pragma omp atomic epoc++; #pragma omp single { // Не обновляем результат, пока не проработали все процессы while (epoc < size) { __asm volatile ("pause" ::: "memory"); } // Обновление результатов double *t = u0; u0 = u1; u1 = t; } } } // Рассчитываем время работы программы time += omp_get_wtime() - start; } // Удаление замка for (size_t i = 0; i < 2 * size; ++i) { omp_destroy_lock(&lock[i]); } printf("\n %d %lf\n", size, time / numexp); // Освобождение памяти free(u0); free(u1); return EXIT_SUCCESS; }
pr48716.c
/* PR c/48716 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ int main (void) { #pragma omp parallel default(none) { static int s; int t = 0; #pragma omp atomic s++; t++; } #pragma omp task default(none) { static int s; int t = 0; #pragma omp atomic s++; t++; } }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class PUBLIC_DOMAIN_TECHNOLOGYStmtBitfields { friend class ASTStmtReader; friend class PUBLIC_DOMAIN_TECHNOLOGYStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 14; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; PUBLIC_DOMAIN_TECHNOLOGYStmtBitfields PUBLIC_DOMAIN_TECHNOLOGYStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; /// PUBLIC_DOMAIN_TECHNOLOGYStmt - This represents a group of statements like { stmt stmt }. class PUBLIC_DOMAIN_TECHNOLOGYStmt final : public Stmt, private llvm::TrailingObjects<PUBLIC_DOMAIN_TECHNOLOGYStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; bool CheckStmt; /// The location of the closing "}". LBraceLoc is stored in PUBLIC_DOMAIN_TECHNOLOGYStmtBits. SourceLocation RBraceLoc; PUBLIC_DOMAIN_TECHNOLOGYStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit PUBLIC_DOMAIN_TECHNOLOGYStmt(EmptyShell Empty) : Stmt(PUBLIC_DOMAIN_TECHNOLOGYStmtClass, Empty) { CheckStmt = false; } void setStmts(ArrayRef<Stmt *> Stmts); public: static PUBLIC_DOMAIN_TECHNOLOGYStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit PUBLIC_DOMAIN_TECHNOLOGYStmt(SourceLocation Loc) : Stmt(PUBLIC_DOMAIN_TECHNOLOGYStmtClass), RBraceLoc(Loc) { PUBLIC_DOMAIN_TECHNOLOGYStmtBits.NumStmts = 0; PUBLIC_DOMAIN_TECHNOLOGYStmtBits.LBraceLoc = Loc; CheckStmt = false; } bool isCheckStmt() const { return CheckStmt; } void negateCheckStmt() { CheckStmt = false; } void setCheckStmt() { CheckStmt = true; } // Build an empty compound statement. static PUBLIC_DOMAIN_TECHNOLOGYStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.NumStmts == 0; } unsigned size() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<PUBLIC_DOMAIN_TECHNOLOGYStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == PUBLIC_DOMAIN_TECHNOLOGYStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
2465a791ab986a73ea2bfecf8a7f75e065006167.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int padfunc(struct dataobj *restrict phi_vec, const int x_M, const int y_M, const int z_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int z_m) { float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]]) phi_vec->data; #pragma omp target enter data map(to: phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1) { #pragma omp target teams distribute parallel for collapse(2) for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[abc_x_l + 2][y + 2][z + 2] = phi[12][y + 2][z + 2]; } } } for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1) { #pragma omp target teams distribute parallel for collapse(2) for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[abc_x_r + 2][y + 2][z + 2] = phi[x_M - 8][y + 2][z + 2]; } } } #pragma omp target teams distribute parallel for collapse(1) for (int x = x_m; x <= x_M; x += 1) { for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[x + 2][abc_y_l + 2][z + 2] = phi[x + 2][12][z + 2]; } } for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[x + 2][abc_y_r + 2][z + 2] = phi[x + 2][y_M - 8][z + 2]; } } for (int y = y_m; y <= y_M; y += 1) { for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1) { phi[x + 2][y + 2][abc_z_l + 2] = phi[x + 2][y + 2][12]; } for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1) { phi[x + 2][y + 2][abc_z_r + 2] = phi[x + 2][y + 2][z_M - 8]; } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; #pragma omp target update from(phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) #pragma omp target exit data map(release: phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) return 0; }
OmpForBodyLink.c
int x; int main() { int i; #pragma omp for for (i = 0; i < 10; i++) { 123; } #pragma omp for for (i = 0; i < 10; i++) { int x; } #pragma omp for for (i = 0; i < 10; i++) { int y; continue; } }
convolution_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_fp16s(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<__fp16>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float val = (float)sptr[space_ofs[k]]; float w = (float)kptr[k]; sum += val * w; } kptr += maxk; } sum = activation_ss(sum, activation_type, activation_params); outptr[j] = (__fp16)sum; } outptr += outw; } } }
pr83977-1.c
/* PR middle-end/83977 */ /* { dg-do compile } */ /* { dg-additional-options "-O2 -w" } */ struct S { int a, b, c; }; #pragma omp declare simd uniform(z) linear(v:1) __attribute__((noinline)) static int foo (int x, int y, struct S z, int u, int v) { return x + y + z.a; } int bar (int x, int y, int z) { struct S s = { z, 1, 1 }; return foo (x, y, s, 0, 0); }
jacobi.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This C version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n,m,mits; double tol,relax=1.0,alpha=0.0543; double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; double dx,dy; int main (void) { float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialized. * * Working variables/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ #pragma aitool fp_plus(2) fp_multiply(2) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; } #pragma aitool fp_minus(6) fp_multiply(5) for (i=0;i<n;i++) for (j=0;j<m;j++) { u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } #pragma aitool fp_plus(2) fp_minus(6) fp_multiply(7) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { double omega; int i,j,k; double error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ { for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma aitool fp_plus(5) fp_minus(2) fp_multiply(5) fp_divide(1) for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k%500==0) printf("Finished %d iteration.\n",k); error = sqrt(error)/(n*m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; double xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; #pragma aitool fp_plus(3) fp_minus(3) fp_multiply(6) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); }
GB_binop__bshift_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_uint64 // A.*B function (eWiseMult): GB_AemultB__bshift_uint64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_uint64 // C+=b function (dense accum): GB_Cdense_accumb__bshift_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint64 // C=scalar+B GB_bind1st__bshift_uint64 // C=scalar+B' GB_bind1st_tran__bshift_uint64 // C=A+scalar GB_bind2nd__bshift_uint64 // C=A'+scalar GB_bind2nd_tran__bshift_uint64 // C type: uint64_t // A type: uint64_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_bitshift_uint64 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bshift_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = GB_bitshift_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint64 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint64 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
parallel_for_loop_test.c
#include <stdio.h> #include <omp.h> int main(){ #pragma omp parallel num_threads(4) { #pragma omp for for (int i = 0; i < 12;i++){ int ID = omp_get_thread_num(); printf("CPU<%d>: %d\n",ID, i); } } printf("-------------------\n"); omp_set_num_threads(4); #pragma omp parallel for for (int i = 0; i < 12;i++){ int ID = omp_get_thread_num(); printf("CPU<%d>: %d\n",ID, i); } printf("\n"); return 0; }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<4; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*16; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*16; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*16; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*16; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j=0; j<nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i<nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; #if __ARM_NEON int32x2_t _shift = vdup_n_s32(-2); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4" ); #endif // __aarch64__ #else int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(6*6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { { 6, 0, 0}, { -4, -4, -4}, { -4, 4, -4}, { 1, 2, 4}, { 1, -2, 4}, { 0, 0, 24} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<9; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); const short* kernel4 = (const short*)kernel_tm.channel(p+4); const short* kernel5 = (const short*)kernel_tm.channel(p+5); const short* kernel6 = (const short*)kernel_tm.channel(p+6); const short* kernel7 = (const short*)kernel_tm.channel(p+7); short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm.channel(p); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; int* outRow2 = outRow0 + outw * 2; int* outRow3 = outRow0 + outw * 3; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = _o0[n] / 576; outRow1[n] = _o1[n] / 576; outRow2[n] = _o2[n] / 576; outRow3[n] = _o3[n] / 576; } #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant0 = scales_dequant[p]; const float scale0 = scale_dequant0 / 576.0; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm float32x4_t _scale0 = vdupq_n_f32(scale0); float32x4_t _out0_f32 = vdupq_n_f32(bias0); float32x4_t _out1_f32 = vdupq_n_f32(bias0); float32x4_t _out2_f32 = vdupq_n_f32(bias0); float32x4_t _out3_f32 = vdupq_n_f32(bias0); _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_o1), _scale0); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_o2), _scale0); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0); vst1q_f32(outRow0, _out0_f32); vst1q_f32(outRow1, _out1_f32); vst1q_f32(outRow2, _out2_f32); vst1q_f32(outRow3, _out3_f32); #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = (float)o0[n] * scale0 + bias0; outRow1[n] = (float)o1[n] * scale0 + bias0; outRow2[n] = (float)o2[n] * scale0 + bias0; outRow3[n] = (float)o3[n] * scale0 + bias0; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8, (size_t)1u); const signed char* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const signed char* k0 = kernel + (p+0)*inch*9; const signed char* k1 = kernel + (p+1)*inch*9; const signed char* k2 = kernel + (p+2)*inch*9; const signed char* k3 = kernel + (p+3)*inch*9; const signed char* k4 = kernel + (p+4)*inch*9; const signed char* k5 = kernel + (p+5)*inch*9; const signed char* k6 = kernel + (p+6)*inch*9; const signed char* k7 = kernel + (p+7)*inch*9; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*9; signed char* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n"//out0 "ld1 {v10.4s, v11.4s}, [%2] \n"//out1 "ld1 {v12.4s, v13.4s}, [%3] \n"//out2 "ld1 {v14.4s, v15.4s}, [%4] \n"//out3 "ld1 {v16.4s, v17.4s}, [%5] \n"//out4 "ld1 {v18.4s, v19.4s}, [%6] \n"//out5 "ld1 {v20.4s, v21.4s}, [%7] \n"//out6 "ld1 {v22.4s, v23.4s}, [%8] \n"//out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k00-k70) "sshll v1.8h, v1.8b, #0 \n"//(k01-k71) "sshll v2.8h, v2.8b, #0 \n"//(k02-k72) "sshll v3.8h, v3.8b, #0 \n"// r0 "sshll v4.8h, v4.8b, #0 \n"// r1 "sshll v7.8h, v7.8b, #0 \n"// r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k03-k73) "sshll v1.8h, v1.8b, #0 \n"//(k04-k74) "sshll v2.8h, v2.8b, #0 \n"//(k05-k75) "sshll v3.8h, v3.8b, #0 \n"// r3 "sshll v4.8h, v4.8b, #0 \n"// r4 "sshll v7.8h, v7.8b, #0 \n"// r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k06-k76) "sshll v1.8h, v1.8b, #0 \n"//(k07-k77) "sshll v2.8h, v2.8b, #0 \n"//(k08-k78) "sshll v3.8h, v3.8b, #0 \n"// r6 "sshll v4.8h, v4.8b, #0 \n"// r7 "sshll v7.8h, v7.8b, #0 \n"// r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2);// (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0);// out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1);// out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2);// out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3);// out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0);// out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1);// out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2);// out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3);// out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp+24); //(k03-k73) _k1_8 = vld1_s8(ktmp+32); //(k04-k74) _k2_8 = vld1_s8(ktmp+40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp+48); //(k06-k76) _k1_8 = vld1_s8(ktmp+56); //(k07-k77) _k2_8 = vld1_s8(ktmp+64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k0-k7) "sshll v1.8h, v1.8b, #0 \n"//(k8) "sshll v2.8h, v2.8b, #0 \n"// r0 "sshll v3.8h, v3.8b, #0 \n"// r1 "sshll v4.8h, v4.8b, #0 \n"// r2 "sshll v6.8h, v6.8b, #0 \n"// r3 "sshll v7.8h, v7.8b, #0 \n"// r4 "sshll v8.8h, v8.8b, #0 \n"// r5 "sshll v10.8h, v10.8b, #0 \n"// r6 "sshll v11.8h, v11.8b, #0 \n"// r7 "sshll v12.8h, v12.8b, #0 \n"// r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n"// out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n"// outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n"// out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n"// outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n"// out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n"// outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n"// out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n"// outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n"// out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
GB_binop__max_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int8) // A.*B function (eWiseMult): GB (_AemultB_08__max_int8) // A.*B function (eWiseMult): GB (_AemultB_02__max_int8) // A.*B function (eWiseMult): GB (_AemultB_04__max_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int8) // A*D function (colscale): GB (_AxD__max_int8) // D*A function (rowscale): GB (_DxB__max_int8) // C+=B function (dense accum): GB (_Cdense_accumB__max_int8) // C+=b function (dense accum): GB (_Cdense_accumb__max_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int8) // C=scalar+B GB (_bind1st__max_int8) // C=scalar+B' GB (_bind1st_tran__max_int8) // C=A+scalar GB (_bind2nd__max_int8) // C=A'+scalar GB (_bind2nd_tran__max_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT8 || GxB_NO_MAX_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cg_single.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ //#include "npb-C.h" /* NAS Parallel Benchmarks 2.3 OpenMP C Versions */ #include <stdio.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ typedef int boolean; typedef struct { double real; double imag; } dcomplex; #define TRUE 1 #define FALSE 0 #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #define pow2(a) ((a)*(a)) #define get_real(c) c.real #define get_imag(c) c.imag #define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag) #define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag) #define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \ c.imag = a.real * b.imag + a.imag * b.real) #define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b) extern double randlc(double *, double); extern void vranlc(int, double *, double, double *); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); //#include "npbparams.h" /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'B' #endif #if CLASS == 'S' /* CLASS = S */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NA 1400 #define NONZER 7 #define NITER 15 #define SHIFT 10.0 #define RCOND 1.0e-1 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'W' /* CLASS = W */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NA 7000 #define NONZER 8 #define NITER 15 #define SHIFT 12.0 #define RCOND 1.0e-1 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'A' /* CLASS = A */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NA 14000 #define NONZER 11 #define NITER 15 #define SHIFT 20.0 #define RCOND 1.0e-1 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'B' /* CLASS = B */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NA 75000 #define NONZER 13 #define NITER 75 #define SHIFT 60.0 #define RCOND 1.0e-1 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'C' /* CLASS = C */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NA 150000 #define NONZER 15 #define NITER 75 #define SHIFT 110.0 #define RCOND 1.0e-1 #define CONVERTDOUBLE FALSE #endif #define COMPILETIME "28 Oct 2014" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O2" #define CS6 "-lm -fopenmp" #define CS7 "randdp" #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) /* global variables */ /* common /partit_size/ */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ static int colidx[NZ+1]; /* colidx[1:NZ] */ static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */ static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */ static int arow[NZ+1]; /* arow[1:NZ] */ static int acol[NZ+1]; /* acol[1:NZ] */ /* common /main_flt_mem/ */ static double v[NA+1+1]; /* v[1:NA+1] */ static double aelt[NZ+1]; /* aelt[1:NZ] */ static double a[NZ+1]; /* a[1:NZ] */ static double x[NA+2+1]; /* x[1:NA+2] */ static double z[NA+2+1]; /* z[1:NA+2] */ static double p[NA+2+1]; /* p[1:NA+2] */ static double q[NA+2+1]; /* q[1:NA+2] */ static double r[NA+2+1]; /* r[1:NA+2] */ static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; /* function declarations */ static void conj_grad (int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double w[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift ); static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc, char **argv) { int i, j, k, it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t, mflops; char cclass; boolean verified; double zeta_verify_value, epsilon; firstrow = 1; lastrow = NA; firstcol = 1; lastcol = NA; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) { cclass = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) { cclass = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) { cclass = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) { cclass = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) { cclass = 'C'; zeta_verify_value = 28.973605592845; } else { cclass = 'U'; } printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", NA); printf(" Iterations: %5d\n", NITER); naa = NA; nzz = NZ; /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc( &tran, amult ); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ makea(naa, nzz, a, colidx, rowstr, NONZER, firstrow, lastrow, firstcol, lastcol, RCOND, arow, acol, aelt, v, iv, SHIFT); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ #pragma omp parallel private(it,i,j,k) { #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } #pragma omp single zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ #pragma omp single { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ #pragma omp for reduction(+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } #pragma omp single norm_temp12 = 1.0 / sqrt( norm_temp12 ); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of do one iteration untimed */ /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } #pragma omp single zeta = 0.0; } /* end parallel */ timer_clear( 1 ); timer_start( 1 ); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ #pragma omp parallel private(it,i,j,k) { for (it = 1; it <= NITER; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ #pragma omp single { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ #pragma omp for reduction(+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } #pragma omp single { norm_temp12 = 1.0 / sqrt( norm_temp12 ); zeta = SHIFT + 1.0 / norm_temp11; } /* end single */ #pragma omp master { if( it == 1 ) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); } /* end master */ /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of main iter inv pow meth */ #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop( 1 ); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read( 1 ); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (cclass != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); printf(" Error is %20.12e\n", zeta - zeta_verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { mflops = (2.0*NITER*NA) * (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 ) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", cclass, NA, 0, 0, NITER, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void conj_grad ( int colidx[], /* colidx[1:nzz] */ int rowstr[], /* rowstr[1:naa+1] */ double x[], /* x[*] */ double z[], /* z[*] */ double a[], /* a[1:nzz] */ double p[], /* p[*] */ double q[], /* q[*] */ double r[], /* r[*] */ double w[], /* w[*] */ double *rnorm ) /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c Floaging point arrays here are named as in NPB1 spec discussion of c CG algorithm c---------------------------------------------------------------------*/ { static double d, sum, rho, rho0, alpha, beta; int i, j, k; int cgit, cgitmax = 25; #pragma omp single nowait rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ #pragma omp for nowait for (j = 1; j <= naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ #pragma omp for reduction(+:rho) for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + x[j]*x[j]; } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp single nowait { rho0 = rho; d = 0.0; rho = 0.0; } /* end single */ /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ #pragma omp for private(sum,k) for (j = 1; j <= lastrow-firstrow+1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } w[j] = sum; } /* unrolled-by-two version #pragma omp for private(i,k) for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; double sum1, sum2; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 2; sum1 = 0.0; sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) { sum1 = sum1 + a[k] * p[colidx[k]]; sum2 = sum2 + a[k+1] * p[colidx[k+1]]; } w[j] = sum1 + sum2; } */ /* unrolled-by-8 version #pragma omp for private(i,k,sum) for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } w[j] = sum; } */ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ #pragma omp for nowait for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; } /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ #pragma omp for reduction(+:d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = d + p[j]*q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ #pragma omp single alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ #pragma omp for reduction(+:rho) for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + r[j]*r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ #pragma omp single beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { p[j] = r[j] + beta*p[j]; } } /* end of do cgit=1,cgitmax */ /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ #pragma omp single nowait sum = 0.0; #pragma omp for private(d, k) for (j = 1; j <= lastrow-firstrow+1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { d = d + a[k]*z[colidx[k]]; } w[j] = d; } #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { r[j] = w[j]; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ #pragma omp for reduction(+:sum) private(d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = x[j] - r[j]; sum = sum + d*d; } #pragma omp single { (*rnorm) = sqrt(sum); } /* end single */ } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea( int n, int nz, double a[], /* a[1:nz] */ int colidx[], /* colidx[1:nz] */ int rowstr[], /* rowstr[1:n+1] */ int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], /* arow[1:nz] */ int acol[], /* acol[1:nz] */ double aelt[], /* aelt[1:nz] */ double v[], /* v[1:n+1] */ int iv[], /* iv[1:2*n+1] */ double shift ) { int i, nnza, iouter, ivelt, ivelt1, irow, nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size, ratio, scale; int jcol; size = 1.0; ratio = pow(rcond, (1.0 / (double)n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= n; i++) { colidx[n+i] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n])); vecset(n, v, iv, &nzv, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i++) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a, colidx, rowstr, n, arow, acol, aelt, firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( double a[], /* a[1:*] */ int colidx[], /* colidx[1:*] */ int rowstr[], /* rowstr[1:*] */ int n, int arow[], /* arow[1:*] */ int acol[], /* acol[1:*] */ double aelt[], /* aelt[1:*] */ int firstrow, int lastrow, double x[], /* x[1:n] */ boolean mark[], /* mark[1:n] */ int nzloc[], /* nzloc[1:n] */ int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i, j, jajp1, nza, k, nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ #pragma omp parallel for for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = FALSE; } rowstr[n+1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j--) { rowstr[j+1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; #pragma omp parallel for for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = FALSE; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k < rowstr[j+1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; if ( mark[i] == FALSE && x[i] != 0.0) { mark[i] = TRUE; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = FALSE; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j+1]; rowstr[j+1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc( int n, int nz, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int nzloc[], /* nzloc[1:n] */ int mark[] ) /* mark[1:n] */ { int nn1; int nzrow, nzv, ii, i; double vecelt, vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while (nzv < nz) { vecelt = randlc(&tran, amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset( int n, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int *nzv, int i, double val) { int k; boolean set; set = FALSE; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = TRUE; } } if (set == FALSE) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } } /* cat ./common/c_print_results.c */ /*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ void c_print_results( char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand) { char *evalue="1000"; printf( "\n\n %s Benchmark Completed\n", name ); printf( " Class = %c\n", cclass ); if( n2 == 0 && n3 == 0 ) printf( " Size = %12d\n", n1 ); /* as in IS */ else printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Threads = %12d\n", nthreads ); printf( " Time in seconds = %12.2f\n", t ); printf( " Mop/s total = %12.2f\n", mops ); printf( " Operation type = %24s\n", optype); if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( " RAND = %s\n", rand ); #ifdef SMP evalue = getenv("MP_SET_NUMTHREADS"); printf( " MULTICPUS = %s\n", evalue ); #endif /* printf( "\n\n" ); printf( " Please send the results of this run to:\n\n" ); printf( " NPB Development Team\n" ); printf( " Internet: npb@nas.nasa.gov\n \n" ); printf( " If email is not available, send this to:\n\n" ); printf( " MS T27A-1\n" ); printf( " NASA Ames Research Center\n" ); printf( " Moffett Field, CA 94035-1000\n\n" ); printf( " Fax: 415-604-3957\n\n" );*/ } /* cat ./common/c_timers.c */ /* #include "wtime.h" #if defined(IBM) #define wtime wtime #elif defined(CRAY) #define wtime WTIME #else #define wtime wtime_ #endif */ /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return( t ); } double start[64], elapsed[64]; /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *)0); // gettimeofday(&tv, (struct timezone *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec; } // common/c_randdp.c /* */ #if defined(USE_POW) #define r23 pow(0.5, 23.0) #define r46 (r23*r23) #define t23 pow(2.0, 23.0) #define t46 (t23*t23) #else #define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5) #define r46 (r23*r23) #define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0) #define t46 (t23*t23) #endif /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ double randlc (double *x, double a) { /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine returns a uniform pseudorandom double precision number in the c range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The returned value RANDLC is normalized to be c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain c the new seed x_1, so that subsequent calls to RANDLC using the same c arguments will generate a continuous sequence. c c This routine should produce the same results on any computer with at least c 48 mantissa bits in double precision floating point data. On 64 bit c systems, double precision should be disabled. c c David H. Bailey October 26, 1990 c c---------------------------------------------------------------------*/ double t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * (*x); x1 = (int)t1; x2 = (*x) - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); (*x) = t3 - t46 * t4; return (r46 * (*x)); }
mash.h
// Gao Wang (c) 2017-2020 wang.gao@columbia.edu #ifndef _MASH_H #define _MASH_H #include <cmath> #include <armadillo> #include <iostream> #ifdef _OPENMP # include <omp.h> #endif using std::log; using std::exp; using std::sqrt; using arma::uword; using arma::vec; using arma::uvec; using arma::rowvec; using arma::colvec; using arma::mat; using arma::cube; using arma::datum; using arma::zeros; using arma::eye; using arma::size; using arma::accu; using arma::sum; using arma::max; using arma::abs; using arma::sqrt; using arma::pow; using arma::exp; using arma::log; using arma::trace; using arma::trans; using arma::find; using arma::inv; using arma::trimatu; using arma::chol; using arma::dot; using arma::intersect; using arma::find; // CONSTANTS // --------- const double LOG_2PI = log(2.0 * M_PI); const double INV_SQRT_2PI = 1.0 / sqrt(2.0 * M_PI); const double LOG_INV_SQRT_2PI = log(INV_SQRT_2PI); // INLINE FUNCTION DEFINITONS // -------------------------- inline vec dnorm(const vec & x, const vec & mu, const vec & sigma2, bool logd = false) { vec res = LOG_INV_SQRT_2PI - log(sqrt(sigma2)) - pow(x - mu, 2.0) / (2.0 * sigma2); if (logd) return res; else return exp(res); } inline vec dmvnorm_mat(const mat & x, const vec & mean, const mat & sigma, bool logd = false, bool inversed = false) { double xdim = static_cast<double>(x.n_rows); vec out(x.n_cols); mat rooti; // we have previously computed rooti // in R eg rooti <- backsolve(chol(sigma), diag(ncol(x))) if (inversed) { rooti = sigma; } else { try { rooti = trans(inv(trimatu(chol(sigma)))); } catch (const std::runtime_error & error) { if (logd) out.fill(-datum::inf); else out.fill(0.0); for (uword i = 0; i < x.n_cols; ++i) if (accu(abs(x.col(i) - mean)) < 1e-6) out.at(i) = datum::inf; return out; } } double rootisum = sum(log(rooti.diag())); double constants = -(xdim / 2.0) * LOG_2PI; for (unsigned i = 0; i < x.n_cols; i++) { vec z = rooti * (x.col(i) - mean); out.at(i) = constants - 0.5 * sum(z % z) + rootisum; } if (logd == false) { out = exp(out); } return out; } inline double dmvnorm(const vec & x, const vec & mean, const mat & sigma, bool logd = false, bool inversed = false) { mat rooti; if (inversed) { rooti = sigma; } else { try { rooti = trans(inv(trimatu(chol(sigma)))); } catch (const std::runtime_error & error) { double diff = accu(abs(x - mean)); if (logd) return (diff < 1e-6) ? datum::inf : -datum::inf; else return (diff < 1e-6) ? datum::inf : 0.0; } } double rootisum = sum(log(rooti.diag())); double constants = -(static_cast<double>(x.n_elem) / 2.0) * LOG_2PI; vec z = rooti * (x - mean); double out = constants - 0.5 * sum(z % z) + rootisum; if (logd == false) { out = exp(out); } return out; } template <class T, class U> inline T pnorm(const U & x, const T & m, const T & s, bool logd = false, bool lower_tail = true) { // see `normalCDF` function at: // http://en.cppreference.com/w/cpp/numeric/math/erfc T res = 0.5 * arma::erfc((x - m) / s * M_SQRT1_2); // FIXME: unlike R::pnorm(0,0,0) = 1 and R::pnorm(-1,0,0) = 0, here it generates NaN // I manually fix it below. // "s == 0" check is not good enough to ensure that res doesn't have NaN due to division by zero uvec nan = arma::find_nonfinite(0 / s); if (nan.n_elem > 0) { res.elem(intersect(find(x >= m), nan)).ones(); res.elem(intersect(find(x < m), nan)).zeros(); } if (!lower_tail & !logd) { return 1.0 - res; } else if (lower_tail & !logd) { return res; } else if (!lower_tail & logd) { return log(1.0 - res); } else { // (lower_tail & logd) return log(res); } } // a quicker way to compute diag(s) %*% V %*% diag(s) inline mat get_cov(const vec & s, const mat & V, const mat & L) { if (L.is_empty()) { /* return arma::diagmat(s) * V * arma::diagmat(s); */ return (V.each_col() % s).each_row() % s.t(); } else { mat svs = (V.each_col() % s).each_row() % s.t(); return L * svs * L.t(); } } inline mat get_cov(const vec & s, const mat & V) { /* return arma::diagmat(s) * V * arma::diagmat(s); */ return (V.each_col() % s).each_row() % s.t(); } // @title posterior_cov // @param Vinv R x R inverse covariance matrix for the likelihood // @param U R x R prior covariance matrix // @return R x R posterior covariance matrix // @description If bhat is N(b,V) and b is N(0,U) then b|bhat N(mu1,U1). This function returns U1. inline mat get_posterior_cov(const mat & Vinv, const mat & U) { // U %*% solve(Vinv %*% U + diag(nrow(U))) mat S = Vinv * U; S.diag() += 1.0; return U * S.i(); } // @title posterior_mean // @param bhat R vector of observations // @param Vinv R x R inverse covariance matrix for the likelihood // @param U1 R x R posterior covariance matrix, computed using posterior_cov // @return R vector of posterior mean // @description If bhat is N(b,V) and b is N(0,U) then b|bhat N(mu1,U1). This function returns mu1. inline vec get_posterior_mean(const vec & bhat, const mat & Vinv, const mat & U1) { return U1 * Vinv * bhat; } inline mat get_posterior_mean_mat(const mat & bhat, const mat & Vinv, const mat & U1) { return U1 * Vinv * bhat; } // SE CLASS // -------- class SE { public: SE(){ } ~SE(){ } void set(const mat & sbhat, const mat & sbhat_alpha) { s = sbhat; if (sbhat_alpha.is_empty()) s_alpha.ones(sbhat.n_rows, sbhat.n_cols); else s_alpha = sbhat_alpha; } void set(int J, int R) { s.ones(J, R); s_alpha.ones(J, R); } void set_original(const mat & value) { s_orig = value; is_orig_empty = s_orig.is_empty(); } mat get_original() const { if (is_orig_empty) return (s); else return (s_orig); } mat get() const { return (s_alpha); } private: mat s; mat s_orig; mat s_alpha; bool is_orig_empty; }; // FUNCTION DECLARATIONS // --------------------- int mash_compute_posterior(const mat& b_mat, const SE& s_obj, const mat& v_mat, const mat& l_mat, const mat& a_mat, const cube& U_cube, const cube& Vinv_cube, const cube& U0_cube, mat& post_mean, mat& post_var, mat& neg_prob, mat& zero_prob, cube& post_cov, const mat& posterior_weights, const int& report_type); int mash_compute_posterior_comcov(const mat& b_mat, const SE & s_obj, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, const mat & posterior_weights, const int & report_type); int mvsermix_compute_posterior(const mat& b_mat, const mat & s_mat, mat & v_mat, cube & U_cube, cube & Vinv_cube, cube & U0_cube, cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights); int mvsermix_compute_posterior_comcov(const mat& b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, const cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights); // POSTERIORMASH CLASS // ------------------- // @param b_mat R by J // @param s_mat R by J // @param s_orig_mat R by J // @param s_alpha_mat R by J // @param v_mat R by R // @param l_mat R by R for the common baseline application (@Yuxin Zou) // @param a_mat Q by R for the common baseline application (@Yuxin Zou) // @param U_cube list of prior covariance matrices, for each mixture component P by R by R class PosteriorMASH { public: PosteriorMASH(const mat & b_mat, const mat & s_mat, const mat & s_alpha_mat, const mat & s_orig_mat, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube) : b_mat(b_mat), v_mat(v_mat), l_mat(l_mat), a_mat(a_mat), U_cube(U_cube) { int J = b_mat.n_cols, R = b_mat.n_rows; if (s_mat.is_empty()) s_obj.set(R, J); else s_obj.set(s_mat, s_alpha_mat); s_obj.set_original(s_orig_mat); if (!a_mat.is_empty()) { R = a_mat.n_rows; } post_mean.set_size(R, J); post_var.set_size(R, J); post_cov.set_size(R, R, J); neg_prob.set_size(R, J); zero_prob.set_size(R, J); post_mean.zeros(); post_var.zeros(); post_cov.zeros(); neg_prob.zeros(); zero_prob.zeros(); #ifdef _OPENMP omp_set_num_threads(1); #endif } ~PosteriorMASH(){ } // @title Compute posterior matrices // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect // @param report_type an integer: 1 for posterior mean only, 2 for posterior second moment, 3 for default mash output, 4 for additionally posterior covariance int compute_posterior(const mat & posterior_weights, const int & report_type) { return mash_compute_posterior(b_mat, s_obj, v_mat, l_mat, a_mat, U_cube, Vinv_cube, U0_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, posterior_weights, report_type); } // @title Compute posterior matrices when covariance SVS is the same for all J conditions // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect // @param report_type an integer: 1 for posterior mean only, 2 for posterior second moment, 3 for default mash output, 4 for additionally posterior covariance int compute_posterior_comcov(const mat & posterior_weights, const int & report_type) { return mash_compute_posterior_comcov(b_mat, s_obj, v_mat, l_mat, a_mat, U_cube, Vinv_cube, U0_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, posterior_weights, report_type); } // compute_posterior_comcov // initializing some optinally precomputed quantities int set_vinv(const cube & value) { Vinv_cube = value; return 0; } int set_U0(const cube & value) { U0_cube = value; return 0; } int set_thread(const int & value) { #ifdef _OPENMP omp_set_num_threads(value); #endif return 0; } // @return PosteriorMean JxR matrix of posterior means // @return PosteriorSD JxR matrix of posterior (marginal) standard deviations // @return NegativeProb JxR matrix of posterior (marginal) probability of being negative // @return ZeroProb JxR matrix of posterior (marginal) probability of being zero mat PosteriorMean(){ return post_mean.t(); } mat PosteriorSD(){ return sqrt(post_var).t(); } cube PosteriorCov(){ return post_cov; } mat NegativeProb(){ return neg_prob.t(); } mat ZeroProb(){ return zero_prob.t(); } private: // input mat b_mat; SE s_obj; mat v_mat; mat l_mat; mat a_mat; cube U_cube; cube Vinv_cube; cube U0_cube; // output // all R X J mat mat post_mean; mat post_var; mat neg_prob; mat zero_prob; // J X R X R cube cube post_cov; }; // POSTERIORASH CLASS // ------------------ // @param b_vec of J // @param s_vec of J // @param s_alpha_vec of J // @param v double // @param U_vec of P class PosteriorASH { public: PosteriorASH(const vec & b_vec, const vec & s_vec, const vec & s_alpha, double v, const vec & U_vec) : b_vec(b_vec), s_vec(s_vec), v(v), U_vec(U_vec) { int J = b_vec.n_elem; if (s_alpha.is_empty()) s_alpha_vec.ones(J); else s_alpha_vec = s_alpha; post_mean.set_size(J); post_var.set_size(J); neg_prob.set_size(J); zero_prob.set_size(J); } ~PosteriorASH(){ } // @title Compute posterior matrices // @description univariate version of PosteriorMASH::compute_posterior(), same logic // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect int compute_posterior(const mat & posterior_weights) { vec vinv = 1 / (s_vec % s_vec * v); unsigned J = b_vec.n_elem; unsigned P = U_vec.n_elem; vec mean(J, arma::fill::zeros); // J X P matrices mat mu1_mat(J, P, arma::fill::zeros); mat mu2_mat(J, P, arma::fill::zeros); mat zero_mat(J, P, arma::fill::zeros); mat neg_mat(J, P, arma::fill::zeros); for (uword p = 0; p < P; ++p) { vec U1 = U_vec.at(p) / (vinv * U_vec.at(p) + 1.0); mu1_mat.col(p) = U1 % vinv % b_vec % s_alpha_vec; U1 = U1 % (s_alpha_vec % s_alpha_vec); mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1; vec sigma = sqrt(U1); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword j = 0; j < J; ++j) { if (U1.at(j) == 0) { zero_mat.at(j, p) = 1.0; neg_mat.at(j, p) = 0.0; } } } // compute weighted means of posterior arrays for (uword j = 0; j < J; ++j) { post_mean.at(j) = dot(mu1_mat.row(j), posterior_weights.col(j)); post_var.at(j) = dot(mu2_mat.row(j), posterior_weights.col(j)); neg_prob.at(j) = dot(neg_mat.row(j), posterior_weights.col(j)); zero_prob.at(j) = dot(zero_mat.row(j), posterior_weights.col(j)); } post_var -= pow(post_mean, 2.0); return 0; } // compute_posterior // @return PosteriorMean J vec of posterior means // @return PosteriorSD J vec of posterior (marginal) standard deviations // @return NegativeProb J vec of posterior (marginal) probability of being negative // @return ZeroProb J vec of posterior (marginal) probability of being zero vec PosteriorMean(){ return post_mean; } vec PosteriorSD(){ return sqrt(post_var); } vec PosteriorCov(){ return post_var; } vec NegativeProb(){ return neg_prob; } vec ZeroProb(){ return zero_prob; } private: // input of J vecs vec b_vec; vec s_vec; vec s_alpha_vec; double v; vec U_vec; // output of J vecs vec post_mean; vec post_var; vec neg_prob; vec zero_prob; }; // MVSERMIX CLASS // -------------- // @title Inferences for Multivariate Single Effect Regression with Mixture prior // @param b_mat R by J // @param s_mat R by J // @param v_mat R by R // @param U_cube list of prior covariance matrices, for each mixture component P by R by R class MVSERMix { public: MVSERMix(const mat & b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube) : b_mat(b_mat), s_mat(s_mat), v_mat(v_mat), U_cube(U_cube) { int J = b_mat.n_cols, R = b_mat.n_rows; post_mean.set_size(R, J); post_var.set_size(R, J); post_cov.set_size(R, R, J); neg_prob.set_size(R, J); zero_prob.set_size(R, J); post_mean.zeros(); post_var.zeros(); post_cov.zeros(); neg_prob.zeros(); zero_prob.zeros(); prior_scalar.set_size(U_cube.n_slices); #ifdef _OPENMP omp_set_num_threads(1); #endif } ~MVSERMix(){ } // @title Compute posterior matrices and EM updates for prior scalar estimate // @description Make posterior inferences, and also perform the EM update for prior scalar, for mvSuSiE model. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect. // @param posterior_variable_weights P X J matrix, the posterior inclusion probabilities of each effect in a single-effect model. // posterior_variable_weights is only relevant when EM updates for prior scalar is needed. int compute_posterior(const mat & posterior_weights, const mat & posterior_variable_weights) { return mvsermix_compute_posterior(b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, posterior_weights, posterior_variable_weights); } // compute_posterior // @title Compute posterior matrices when covariance SVS is the same for all J conditions // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect int compute_posterior_comcov(const mat & posterior_weights, const mat & posterior_variable_weights) { return mvsermix_compute_posterior_comcov(b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, posterior_weights, posterior_variable_weights); } // compute_posterior_comcov // initializing some optinally precomputed quantities int set_Vinv(const cube & value) { Vinv_cube = value; return 0; } int set_U0(const cube & value) { U0_cube = value; return 0; } int set_Uinv(const cube & value) { Uinv_cube = value; return 0; } int set_thread(const int & value) { #ifdef _OPENMP omp_set_num_threads(value); #endif return 0; } // @return PosteriorMean JxR matrix of posterior means // @return PosteriorSD JxR matrix of posterior (marginal) standard deviations // @return NegativeProb JxR matrix of posterior (marginal) probability of being negative // @return ZeroProb JxR matrix of posterior (marginal) probability of being zero mat PosteriorMean(){ return post_mean.t(); } mat PosteriorSD(){ return sqrt(post_var).t(); } cube PosteriorCov(){ return post_cov; } mat NegativeProb(){ return neg_prob.t(); } mat ZeroProb(){ return zero_prob.t(); } vec PriorScalar(){ return prior_scalar; } private: // input mat b_mat; mat s_mat; mat v_mat; cube U_cube; cube Vinv_cube; cube U0_cube; cube Uinv_cube; // output // all R X J mat mat post_mean; mat post_var; mat neg_prob; mat zero_prob; // J X R X R cube cube post_cov; // P vector of scalars vec prior_scalar; }; // Softmax functions: yi = exp(xi) / sum(exp(xj)) inline vec softmax(const vec & x) { // Calculate exp() // Subtract the max - this prevents overflow, which happens for x ~ 1000 vec y = exp(x - max(x)); // Renormalise y /= sum(y); return y; } // function for "shrinking" the covariance matrix, to get $\hat U_k$. inline mat shrink_cov(const mat & V, const double & eps) { vec eigval; mat eigvec; eig_sym(eigval, eigvec, V); for (uword i = 0; i < eigval.n_elem; ++i) { eigval(i) = (eigval(i) > 1.0) ? eigval(i) : (1.0 + eps); } return eigvec * diagmat(eigval) * trans(eigvec); } // TEEM CLASS // ---------- // @title Truncated Eigenvalue Extreme deconvolution // @description ... // @param X // @param w // @param U // @param maxiter // @param tol // @param verbose class TEEM { public: TEEM(const mat & X_mat, const vec & w_vec, const cube & U_cube) : X_mat(X_mat), w_vec(w_vec) { T_cube = U_cube; for (unsigned j = 0; j < T_cube.n_slices; ++j) { T_cube.slice(j) += eye(size(T_cube.slice(j))); } } ~TEEM(){ } vec get_objective() const { return objective; } vec get_maxd() const { return maxd; } vec get_w() const { return w_vec; } cube get_U() const { cube U_cube = T_cube; for (unsigned j = 0; j < U_cube.n_slices; ++j) { U_cube.slice(j) -= eye(size(U_cube.slice(j))); } return U_cube; } int fit(const int & maxiter, const double & converge_tol, const double & eigen_tol, const bool & verbose) { // initialize to store progress objective.zeros(maxiter); maxd.zeros(maxiter); int iter_out = 0; // Get the number of samples (n) and the number of mixture components (k) unsigned int n = X_mat.n_rows; unsigned int k = w_vec.size(); for (unsigned int iter = 0; iter < (unsigned int) maxiter; ++iter) { // store parameters and likelihood in the previous step vec w0_vec = w_vec; // E-step: calculate posterior probabilities using the current mu and sigmas mat logP = zeros<mat>(n, k); // n by k matrix for (unsigned j = 0; j < k; ++j) { logP.col(j) = log(w_vec(j)) + dmvnorm_mat(trans(X_mat), zeros<vec>( X_mat.n_cols), T_cube.slice(j), true); // ?? } // softmax for renormalization mat P_mat = zeros<mat>(k, n); // k by n matrix. because of row/col vec converting for (uword i = 0; i < n; ++i) { colvec y = arma::conv_to<colvec>::from(logP.row(i)); P_mat.col(i) = softmax(y); } P_mat = trans(P_mat); // n by k matrix // M-step: for (unsigned int j = 0; j < k; ++j) { T_cube.slice(j) = trans(X_mat) * (P_mat.col(j) % X_mat.each_col()) / accu(P_mat.col(j)); T_cube.slice(j) = shrink_cov(T_cube.slice(j), eigen_tol); } // update mixture weights w_vec = arma::conv_to<colvec>::from(sum(P_mat, 0)) / n; // 0:sum by column; // Compute log-likelihood at the current estimates double f = compute_loglik(); // Check stopping criterion double d = max(abs(w_vec - w0_vec)); maxd(iter) = d; objective(iter) = f; iter_out = iter; if (d < converge_tol) { break; } } objective.resize(iter_out + 1); maxd.resize(iter_out + 1); return 0; } // fit private: mat X_mat; vec w_vec; cube T_cube; vec objective; vec maxd; double compute_loglik() { unsigned int n = X_mat.n_rows; unsigned int k = w_vec.size(); vec y = zeros<vec>(n); for (unsigned int j = 0; j < k; ++j) { y = y + w_vec(j) * dmvnorm_mat(trans(X_mat), zeros<vec>(X_mat.n_cols), T_cube.slice(j)); } return (sum(log(y))); } }; // FUNCTION DEFINITIONS // -------------------- // @title calc_lik // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior covariances // @param b_mat R by J // @param s_mat R by J // @param v_mat R by R // @param l_mat R by R for the common baseline application (@Yuxin Zou) // @param U_cube list of prior covariance matrices // @param sigma_cube list of sigma which is result of get_cov(s_mat, v_mat, l_mat) // @param logd if true computes log-likelihood // @param common_cov if true use version for common covariance // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const mat & b_mat, const mat & s_mat, const mat & v_mat, const mat & l_mat, const cube & U_cube, const cube & sigma_cube, bool logd, bool common_cov, int n_thread = 1) { // In armadillo data are stored with column-major ordering // slicing columns are therefore faster than rows // lik is a J by P matrix mat lik(b_mat.n_cols, U_cube.n_slices, arma::fill::zeros); vec mean(b_mat.n_rows, arma::fill::zeros); mat sigma; #ifdef _OPENMP omp_set_num_threads(n_thread); #endif if (common_cov) { if (!sigma_cube.is_empty()) sigma = sigma_cube.slice(0); else sigma = get_cov(s_mat.col(0), v_mat, l_mat); #pragma omp parallel for default(none) schedule(static) shared(lik, U_cube, mean, sigma, logd, b_mat) for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dmvnorm_mat(b_mat, mean, sigma + U_cube.slice(p), logd); } } else { #pragma \ omp parallel for default(none) schedule(static) shared(lik, mean, logd, U_cube, b_mat, sigma_cube, l_mat, v_mat, s_mat) private(sigma) for (uword j = 0; j < lik.n_rows; ++j) { if (!sigma_cube.is_empty()) sigma = sigma_cube.slice(j); else sigma = get_cov(s_mat.col(j), v_mat, l_mat); for (uword p = 0; p < lik.n_cols; ++p) { lik.at(j, p) = dmvnorm(b_mat.col(j), mean, sigma + U_cube.slice(p), logd); } } } return lik; } // @title calc_lik multivariate common cov version with sigma inverse precomputed // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior covariances // @param b_mat R by J // @param rooti_cube R by R by P, or R by R by J by P, if common_cov is False // @param logd if true computes log-likelihood // @param common_cov if true use version for common covariance // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const mat & b_mat, const cube & rooti_cube, bool logd, bool common_cov, int n_thread = 1) { #ifdef _OPENMP omp_set_num_threads(n_thread); #endif // In armadillo data are stored with column-major ordering // slicing columns are therefore faster than rows // lik is a J by P matrix int P; if (common_cov) P = rooti_cube.n_slices; else P = rooti_cube.n_slices / b_mat.n_cols; mat lik(b_mat.n_cols, P, arma::fill::zeros); vec mean(b_mat.n_rows, arma::fill::zeros); if (common_cov) { #pragma omp parallel for default(none) schedule(static) shared(lik, mean, logd, rooti_cube, b_mat) for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dmvnorm_mat(b_mat, mean, rooti_cube.slice(p), logd, true); } } else { #pragma omp parallel for default(none) schedule(static) shared(lik, mean, logd, rooti_cube, b_mat) for (uword j = 0; j < lik.n_rows; ++j) { for (uword p = 0; p < lik.n_cols; ++p) { lik.at(j, p) = dmvnorm(b_mat.col(j), mean, rooti_cube.slice(j * lik.n_cols + p), logd, true); } } } return lik; } // @title calc_lik univariate version // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior sigma // @param b_vec of J // @param s_vec of J // @param v numeric // @param U_vec P vector // @param logd if true computes log-likelihood // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const vec & b_vec, const vec & s_vec, double v, const vec & U_vec, bool logd) { mat lik(b_vec.n_elem, U_vec.n_elem, arma::fill::zeros); vec sigma = s_vec % s_vec * v; vec mean(b_vec.n_elem, arma::fill::zeros); for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dnorm(b_vec, mean, sigma + U_vec.at(p), logd); } return lik; } // This implements the core part of the compute_posterior method in // the PosteriorMASH class. int mash_compute_posterior(const mat& b_mat, const SE& s_obj, const mat& v_mat, const mat& l_mat, const mat& a_mat, const cube& U_cube, const cube& Vinv_cube, const cube& U0_cube, mat& post_mean, mat& post_var, mat& neg_prob, mat& zero_prob, cube& post_cov, const mat& posterior_weights, const int& report_type) { vec mean(post_mean.n_rows); mean.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, report_type, mean, post_mean, post_var, neg_prob, zero_prob, post_cov, b_mat, s_obj, l_mat, v_mat, a_mat, U_cube, Vinv_cube, U0_cube) for (uword j = 0; j < post_mean.n_cols; ++j) { // FIXME: improved math may help here mat Vinv_j; if (Vinv_cube.is_empty()) Vinv_j = inv_sympd(get_cov(s_obj.get_original().col(j), v_mat, l_mat)); else Vinv_j = Vinv_cube.slice(j); // R X P matrices mat mu1_mat(post_mean.n_rows, U_cube.n_slices); mat diag_mu2_mat(post_mean.n_rows, U_cube.n_slices); mat zero_mat(post_mean.n_rows, U_cube.n_slices); mat neg_mat(post_mean.n_rows, U_cube.n_slices); mu1_mat.fill(0); diag_mu2_mat.fill(0); zero_mat.fill(0); for (uword p = 0; p < U_cube.n_slices; ++p) { // mat U1(post_mean.n_rows, post_mean.n_rows); mat U0; U1.fill(0); if (U0_cube.is_empty()) U0 = get_posterior_cov(Vinv_j, U_cube.slice(p)); else U0 = U0_cube.slice(j * U_cube.n_slices + p); if (a_mat.is_empty()) { mu1_mat.col(p) = get_posterior_mean(b_mat.col(j), Vinv_j, U0) % s_obj.get().col(j); U1 = (U0.each_col() % s_obj.get().col(j)).each_row() % s_obj.get().col(j).t(); } else { mu1_mat.col(p) = a_mat * (get_posterior_mean(b_mat.col(j), Vinv_j, U0) % s_obj.get().col(j)); U1 = a_mat * (((U0.each_col() % s_obj.get().col(j)).each_row() % s_obj.get().col(j).t()) * a_mat.t()); } if (report_type == 2 || report_type == 4) { post_cov.slice(j) += posterior_weights.at(p, j) * (U1 + mu1_mat.col(p) * mu1_mat.col(p).t()); } vec sigma = sqrt(U1.diag()); // U1.diag() is the posterior covariance diag_mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1.diag(); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword r = 0; r < sigma.n_elem; ++r) { if (sigma.at(r) == 0) { zero_mat.at(r, p) = 1.0; neg_mat.at(r, p) = 0.0; } } } // compute weighted means of posterior arrays post_mean.col(j) = mu1_mat * posterior_weights.col(j); post_var.col(j) = diag_mu2_mat * posterior_weights.col(j); neg_prob.col(j) = neg_mat * posterior_weights.col(j); zero_prob.col(j) = zero_mat * posterior_weights.col(j); // if (report_type == 4) post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } post_var -= pow(post_mean, 2.0); return 0; } // mash_compute_posterior // This implements the core part of the compute_posterior_comcov method in // the PosteriorMASH class. int mash_compute_posterior_comcov(const mat& b_mat, const SE & s_obj, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, const mat & posterior_weights, const int & report_type) { mat mean(post_mean.n_rows, post_mean.n_cols); mean.fill(0); // R X R mat Vinv; if (Vinv_cube.is_empty()) Vinv = inv_sympd(get_cov(s_obj.get_original().col(0), v_mat, l_mat)); else Vinv = Vinv_cube.slice(0); rowvec ones(post_mean.n_cols); rowvec zeros(post_mean.n_cols); ones.fill(1); zeros.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, report_type, mean, Vinv, ones, zeros, post_mean, post_var, neg_prob, zero_prob, post_cov, b_mat, s_obj, a_mat, U_cube, U0_cube) for (uword p = 0; p < U_cube.n_slices; ++p) { mat zero_mat(post_mean.n_rows, post_mean.n_cols); // R X R mat U1(post_mean.n_rows, post_mean.n_rows); // R X J mat mu1_mat(post_mean.n_rows, post_mean.n_cols); mat U0; zero_mat.fill(0); U1.fill(0); mu1_mat.fill(0); if (U0_cube.is_empty()) U0 = get_posterior_cov(Vinv, U_cube.slice(p)); else U0 = U0_cube.slice(p); if (a_mat.is_empty()) { mu1_mat = get_posterior_mean_mat(b_mat, Vinv, U0) % s_obj.get(); U1 = (U0.each_col() % s_obj.get().col(0)).each_row() % s_obj.get().col(0).t(); } else { mu1_mat = a_mat * (get_posterior_mean_mat(b_mat, Vinv, U0) % s_obj.get()); U1 = a_mat * (((U0.each_col() % s_obj.get().col(0)).each_row() % s_obj.get().col(0).t()) * a_mat.t()); } // R X J mat diag_mu2_mat = pow(mu1_mat, 2.0); diag_mu2_mat.each_col() += U1.diag(); // R X J // FIXME: any better way to init sigma? mat sigma(post_mean.n_rows, post_mean.n_cols); sigma.fill(0); sigma.each_col() += sqrt(U1.diag()); // U1.diag() is the posterior covariance mat neg_mat = pnorm(mu1_mat, mean, sigma); for (uword r = 0; r < sigma.n_rows; ++r) { if (sigma.at(r, 0) == 0) { zero_mat.row(r) = ones; neg_mat.row(r) = zeros; } } // compute weighted means of posterior arrays #pragma omp critical { post_mean += mu1_mat.each_row() % posterior_weights.row(p); post_var += diag_mu2_mat.each_row() % posterior_weights.row(p); neg_prob += neg_mat.each_row() % posterior_weights.row(p); zero_prob += zero_mat.each_row() % posterior_weights.row(p); if (report_type == 2 || report_type == 4) { for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) += posterior_weights.at(p, j) * (U1 + mu1_mat.col(j) * mu1_mat.col(j).t()); } } } } post_var -= pow(post_mean, 2.0); // if (report_type == 4) { #pragma omp parallel for schedule(static) default(none) shared(post_cov, post_mean) for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } } return 0; } // mash_compute_posterior_comcov // This implements the core part of the compute_posterior method in // the MVSERMix class. int mvsermix_compute_posterior(const mat& b_mat, const mat & s_mat, mat & v_mat, cube & U_cube, cube & Vinv_cube, cube & U0_cube, cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights) { vec mean(post_mean.n_rows); mean.fill(0); // This is meant to store a length P of 2nd moment matrices, // each element is \sum_j posterior_{p,j} * mu2_{p,j} cube Eb2_cube; bool to_estimate_prior = !posterior_variable_weights.is_empty(); if (to_estimate_prior) { // we will compute the EM update for prior scalar here // for use with mmbr package Eb2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); Eb2_cube.zeros(); } #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, posterior_variable_weights, to_estimate_prior, mean, Eb2_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube) for (uword j = 0; j < post_mean.n_cols; ++j) { // FIXME: improved math may help here mat Vinv_j; if (Vinv_cube.is_empty()) Vinv_j = inv_sympd(get_cov(s_mat.col(j), v_mat)); else Vinv_j = Vinv_cube.slice(j); // R X P matrices mat mu1_mat(post_mean.n_rows, U_cube.n_slices); mat diag_mu2_mat(post_mean.n_rows, U_cube.n_slices); mat zero_mat(post_mean.n_rows, U_cube.n_slices); mat neg_mat(post_mean.n_rows, U_cube.n_slices); mu1_mat.fill(0); diag_mu2_mat.fill(0); zero_mat.fill(0); neg_mat.fill(0); // R X R X P cube mu2_cube; mu2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); for (uword p = 0; p < U_cube.n_slices; ++p) { mat U1; if (U0_cube.is_empty()) U1 = get_posterior_cov(Vinv_j, U_cube.slice(p)); else U1 = U0_cube.slice(j * U_cube.n_slices + p); mu1_mat.col(p) = get_posterior_mean(b_mat.col(j), Vinv_j, U1); // this is posterior 2nd moment for the j-th variable and the p-th prior mu2_cube.slice(p) = U1 + mu1_mat.col(p) * mu1_mat.col(p).t(); // add to posterior 2nd moment contribution of the p-th component post_cov.slice(j) += posterior_weights.at(p, j) * mu2_cube.slice(p); vec sigma = sqrt(U1.diag()); // U1.diag() is the posterior covariance diag_mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1.diag(); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword r = 0; r < sigma.n_elem; ++r) { if (sigma.at(r) == 0) { zero_mat.at(r, p) = 1.0; neg_mat.at(r, p) = 0.0; } } } // compute weighted means of posterior arrays post_mean.col(j) = mu1_mat * posterior_weights.col(j); post_var.col(j) = diag_mu2_mat * posterior_weights.col(j); neg_prob.col(j) = neg_mat * posterior_weights.col(j); zero_prob.col(j) = zero_mat * posterior_weights.col(j); post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); if (to_estimate_prior) { #pragma omp critical { for (uword p = 0; p < U_cube.n_slices; ++p) { // we will compute some quantity to provide for // EM update for prior scalar in mmbr package // the M-step update is: // \sigma_0^2 = \sum_{p=1}^P p(\gamma_p) \mathrm{tr}(U_p^{-1} E[bb^T \,|\, \gamma_p])/r // where E[bb^T \,|\, \gamma_p] = \sum_j \alpha_{p,j} * mu2_mat_{p,j} Eb2_cube.slice(p) += posterior_variable_weights.at(p, j) * mu2_cube.slice(p); } } } } post_var -= pow(post_mean, 2.0); if (to_estimate_prior) { // now compute \mathrm{tr}(U_p^{-1} E[bb^T \,|\, \gamma_p])/r for each p for (uword p = 0; p < U_cube.n_slices; ++p) { prior_scalar.at(p) = trace(Uinv_cube.slice(p) * Eb2_cube.slice(p)); } } return 0; } // mvsermix_compute_posterior // This implements the core part of the compute_posterior_comcov method in // the MVSERMix class. int mvsermix_compute_posterior_comcov(const mat& b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, const cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights) { mat mean(post_mean.n_rows, post_mean.n_cols); mean.fill(0); // for Eb2_cube see compute_posterior() for detailed documentations. cube Eb2_cube; bool to_estimate_prior = !posterior_variable_weights.is_empty(); if (to_estimate_prior) { Eb2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); Eb2_cube.zeros(); } // R X R mat Vinv; if (Vinv_cube.is_empty()) Vinv = inv_sympd(get_cov(s_mat.col(0), v_mat)); else Vinv = Vinv_cube.slice(0); rowvec ones(post_mean.n_cols); rowvec zeros(post_mean.n_cols); ones.fill(1); zeros.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, posterior_variable_weights, to_estimate_prior, mean, Vinv, zeros, ones, Eb2_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, b_mat, U_cube, U0_cube, Uinv_cube) for (uword p = 0; p < U_cube.n_slices; ++p) { mat zero_mat(post_mean.n_rows, post_mean.n_cols); // R X R mat U1; // R X J mat mu1_mat; zero_mat.fill(0); if (U0_cube.is_empty()) U1 = get_posterior_cov(Vinv, U_cube.slice(p)); else U1 = U0_cube.slice(p); mu1_mat = get_posterior_mean_mat(b_mat, Vinv, U1); cube mu2_cube; mu2_cube.set_size(post_mean.n_rows, post_mean.n_rows, post_mean.n_cols); for (uword j = 0; j < post_mean.n_cols; ++j) { mu2_cube.slice(j) = U1 + mu1_mat.col(j) * mu1_mat.col(j).t(); if (to_estimate_prior) Eb2_cube.slice(p) += posterior_variable_weights.at(p, j) * mu2_cube.slice(j); } if (to_estimate_prior) prior_scalar.at(p) = trace(Uinv_cube.slice(p) * Eb2_cube.slice(p)); // R X J mat diag_mu2_mat = pow(mu1_mat, 2.0); diag_mu2_mat.each_col() += U1.diag(); // R X J // FIXME: any better way to init sigma? mat sigma(post_mean.n_rows, post_mean.n_cols); sigma.fill(0); sigma.each_col() += sqrt(U1.diag()); // U1.diag() is the posterior covariance mat neg_mat = pnorm(mu1_mat, mean, sigma); for (uword r = 0; r < sigma.n_rows; ++r) { if (sigma.at(r, 0) == 0) { zero_mat.row(r) = ones; neg_mat.row(r) = zeros; } } #pragma omp critical { // compute weighted means of posterior arrays post_mean += mu1_mat.each_row() % posterior_weights.row(p); post_var += diag_mu2_mat.each_row() % posterior_weights.row(p); neg_prob += neg_mat.each_row() % posterior_weights.row(p); zero_prob += zero_mat.each_row() % posterior_weights.row(p); for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) += posterior_weights.at(p, j) * mu2_cube.slice(j); } } } post_var -= pow(post_mean, 2.0); #pragma omp parallel for schedule(static) default(none) shared(post_cov, post_mean) for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } return 0; } // mvsermix_compute_posterior_comcov #endif // ifndef _MASH_H
ceil_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com * Update: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_ceil_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = input_tensor->data; float* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceilf(out_data[i]); } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceilf(src[i]); } } return 0; } return -1; } int ref_ceil_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { /* dequant */ uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_data = ( float* )sys_malloc(input_size * sizeof(float)); float* out_data = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_data[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale; } // dims size = 2 or 3 if (input_tensor->dim_num < 4) { int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceil(out_data[i]); } // return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceil(src[i]); } } // return 0; } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(out_data[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_data); sys_free(out_data); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_ceil_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_ceil_uint8(input_tensor, output_tensor, exec_graph->num_thread); else TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_ceil_ref_op(void* arg) { return register_builtin_node_ops(OP_CEIL, &hcl_node_ops); } int unregister_ceil_ref_op(void* arg) { return unregister_builtin_node_ops(OP_CEIL, &hcl_node_ops); }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define ErrorRelativeWeight PerceptibleReciprocal(16) #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double diffusion, weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t count, x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; double bisect; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha, beta, distance, pixel; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*p->alpha); beta=(MagickRealType) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; size_t index; ssize_t x, v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16; pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16; pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=cube_info->diffusion*previous[u+v].red/16; pixel.green+=cube_info->diffusion*previous[u+v].green/16; pixel.blue+=cube_info->diffusion*previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16; } pixel.red+=5.0*cube_info->diffusion*previous[u].red/16; pixel.green+=5.0*cube_info->diffusion*previous[u].green/16; pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16; pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16; pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CubeInfo *p; DoublePixelPacket color, pixel; MagickBooleanType proceed; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].red; pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].green; pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType Riemersma(Image *image,CacheView *image_view, CubeInfo *cube_info,const size_t level,const unsigned int direction, ExceptionInfo *exception) { MagickBooleanType status; status=MagickTrue; if (level == 1) switch (direction) { case WestGravity: { status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } return(status); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; size_t extent, level; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) cube_info->diffusion=StringToDoubleInterval(artifact,1.0); if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; extent=MagickMax(image->columns,image->rows); level=(size_t) log2((double) extent); if ((1UL << level) < extent) level++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); status=MagickTrue; if (level > 0) status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double weight; size_t length; ssize_t i; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]=PerceptibleReciprocal(weight); weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0)); } cube_info->diffusion=1.0; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait != UndefinedPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t colors, depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; colors=number_colors; for (depth=1; colors != 0; depth++) colors>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != UndefinedPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t i; ssize_t y; for (i=0; i < (ssize_t) number_threads; i++) (void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i; ssize_t j; /* Assign each pixel whose mean has the least squared color distance. */ j=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; j=i; } } kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][j].count++; kmeans_pixels[id][j].distortion+=min_distance; SetPixelIndex(image,(Quantum) j,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (i=1; i < (ssize_t) number_threads; i++) { ssize_t j; for (j=0; j < (ssize_t) image->colors; j++) { kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red; kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green; kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue; if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black; kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count; kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (i=0; i < (ssize_t) image->colors; i++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count); image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red; image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green; image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue; if (image->alpha_trait != UndefinedPixelTrait) image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha; if (image->colorspace == CMYKColorspace) image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black; distortion+=kmeans_pixels[0][i].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; ImageType type; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; type=IdentifyImageType(image,exception); if (IsGrayImageType(type) != MagickFalse) (void) SetGrayscaleImage(image,exception); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5)) depth--; if (IsGrayImageType(type) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; size_t depth, maximum_colors, number_images; ssize_t i; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { size_t n, number_children; ssize_t i; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; size_t extent; ssize_t *colormap_index, i, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
GB_compiler.h
//------------------------------------------------------------------------------ // GB_compiler.h: handle compiler variations //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #ifndef GB_COMPILER_H #define GB_COMPILER_H //------------------------------------------------------------------------------ // compiler variations //------------------------------------------------------------------------------ // Determine the restrict keyword, and whether or not variable-length arrays // are supported. #if ( _MSC_VER && !__INTEL_COMPILER ) // Microsoft Visual Studio does not have the restrict keyword, but it does // support __restrict, which is equivalent. Variable-length arrays are // not supported. OpenMP tasks are not available, GraphBLAS no longer // uses OpenMP tasks. #define GB_MICROSOFT 1 #define GB_RESTRICT __restrict #define GB_HAS_VLA 0 #elif GxB_STDC_VERSION >= 199901L // ANSI C99 and later have the restrict keyword and variable-length arrays. #define GB_MICROSOFT 0 #define GB_RESTRICT restrict #define GB_HAS_VLA 1 #else // ANSI C95 and earlier have neither #define GB_MICROSOFT 0 #define GB_RESTRICT #define GB_HAS_VLA 0 #endif //------------------------------------------------------------------------------ // Microsoft specific include files //------------------------------------------------------------------------------ #if GB_MICROSOFT #include <malloc.h> #endif //------------------------------------------------------------------------------ // PGI_COMPILER_BUG //------------------------------------------------------------------------------ // If GraphBLAS is compiled with -DPGI_COMPILER_BUG, then a workaround is // enabled for a bug in the PGI compiler. The compiler does not correctly // handle automatic arrays of variable size. #ifdef PGI_COMPILER_BUG // override the ANSI C compiler to turn off variable-length arrays #undef GB_HAS_VLA #define GB_HAS_VLA 0 #endif //------------------------------------------------------------------------------ // OpenMP pragmas and tasks //------------------------------------------------------------------------------ // GB_PRAGMA(x) becomes "#pragma x", but the way to do this depends on the // compiler: #if GB_MICROSOFT // MS Visual Studio is not ANSI C11 compliant, and uses __pragma: #define GB_PRAGMA(x) __pragma (x) #else // ANSI C11 compilers use _Pragma: #define GB_PRAGMA(x) _Pragma (#x) #endif // construct pragmas for loop vectorization: #if GB_MICROSOFT // no #pragma omp simd is available in MS Visual Studio #define GB_PRAGMA_SIMD #define GB_PRAGMA_SIMD_REDUCTION(op,s) #else // create two kinds of SIMD pragmas: // GB_PRAGMA_SIMD becomes "#pragma omp simd" // GB_PRAGMA_SIMD_REDUCTION (+,cij) becomes // "#pragma omp simd reduction(+:cij)" #define GB_PRAGMA_SIMD GB_PRAGMA (omp simd) #define GB_PRAGMA_SIMD_REDUCTION(op,s) GB_PRAGMA (omp simd reduction(op:s)) #endif #define GB_PRAGMA_IVDEP GB_PRAGMA(ivdep) //------------------------------------------------------------------------------ // variable-length arrays //------------------------------------------------------------------------------ // If variable-length arrays are not supported, user-defined types are limited // in size to 128 bytes or less. Many of the type-generic routines allocate // workspace for a single scalar of variable size, using a statement: // // GB_void aij [xsize] ; // // To support non-variable-length arrays in ANSI C95 or earlier, this is used: // // GB_void aij [GB_VLA(xsize)] ; // // GB_VLA(xsize) is either defined as xsize (for ANSI C99 or later), or a fixed // size of 128, in which case user-defined types // are limited to a max of 128 bytes. #if ( GB_HAS_VLA ) // variable-length arrays are allowed #define GB_VLA(s) s #else // variable-length arrays are not allowed #define GB_VLA_MAXSIZE 128 #define GB_VLA(s) GB_VLA_MAXSIZE #endif #endif
facedist.c
/* Generated by Cython 0.24 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__facedist #define __PYX_HAVE_API__facedist #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "facedist.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":739 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":757 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'libc.math' */ /* Module declarations from 'facedist' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t = { "float64_t", NULL, sizeof(__pyx_t_5numpy_float64_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "facedist" int __pyx_module_is_main_facedist = 0; /* Implementation of 'facedist' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_A[] = "A"; static const char __pyx_k_D[] = "D"; static const char __pyx_k_ii[] = "ii"; static const char __pyx_k_jj[] = "jj"; static const char __pyx_k_nn[] = "nn"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_rd[] = "rd"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mean[] = "mean"; static const char __pyx_k_ncol[] = "ncol"; static const char __pyx_k_nrow[] = "nrow"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_cdist[] = "cdist"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_metric[] = "metric"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_facedist[] = "facedist"; static const char __pyx_k_euclidean[] = "euclidean"; static const char __pyx_k_mean_dist[] = "mean_dist"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_dok_matrix[] = "dok_matrix"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_scipy_sparse[] = "scipy.sparse"; static const char __pyx_k_scipy_spatial_distance[] = "scipy.spatial.distance"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_home_mlode_BA_Moritz_Implementa[] = "/home/mlode/BA-Moritz/Implementation/src/face_recognition/cython_sparse_arr/facedist.pyx"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_A; static PyObject *__pyx_n_s_D; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_cdist; static PyObject *__pyx_n_s_dok_matrix; static PyObject *__pyx_n_s_euclidean; static PyObject *__pyx_n_s_facedist; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_kp_s_home_mlode_BA_Moritz_Implementa; static PyObject *__pyx_n_s_ii; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_jj; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mean; static PyObject *__pyx_n_s_mean_dist; static PyObject *__pyx_n_s_metric; static PyObject *__pyx_n_s_ncol; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_nn; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_nrow; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rd; static PyObject *__pyx_n_s_scipy_sparse; static PyObject *__pyx_n_s_scipy_spatial_distance; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_8facedist_mean_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_codeobj__8; /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ /* Python wrapper */ static PyObject *__pyx_pw_8facedist_1mean_dist(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/ static PyMethodDef __pyx_mdef_8facedist_1mean_dist = {"mean_dist", (PyCFunction)__pyx_pw_8facedist_1mean_dist, METH_O, 0}; static PyObject *__pyx_pw_8facedist_1mean_dist(PyObject *__pyx_self, PyObject *__pyx_v_A) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mean_dist (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) __PYX_ERR(0, 21, __pyx_L1_error) __pyx_r = __pyx_pf_8facedist_mean_dist(__pyx_self, ((PyArrayObject *)__pyx_v_A)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8facedist_mean_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A) { Py_ssize_t __pyx_v_nrow; CYTHON_UNUSED Py_ssize_t __pyx_v_ncol; Py_ssize_t __pyx_v_ii; Py_ssize_t __pyx_v_jj; Py_ssize_t __pyx_v_nn; PyArrayObject *__pyx_v_D = 0; __pyx_t_5numpy_float64_t __pyx_v_rd; __Pyx_LocalBuf_ND __pyx_pybuffernd_D; __Pyx_Buffer __pyx_pybuffer_D; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; __pyx_t_5numpy_float64_t __pyx_t_13; Py_ssize_t __pyx_t_14; __Pyx_RefNannySetupContext("mean_dist", 0); __pyx_pybuffer_D.pybuffer.buf = NULL; __pyx_pybuffer_D.refcount = 0; __pyx_pybuffernd_D.data = NULL; __pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D; /* "facedist.pyx":27 * cdef: * # Py_ssize_t is just a special platform-specific type for indices * Py_ssize_t nrow = A.shape[0] # <<<<<<<<<<<<<< * Py_ssize_t ncol = A.shape[1] * Py_ssize_t ii, jj, nn */ __pyx_v_nrow = (__pyx_v_A->dimensions[0]); /* "facedist.pyx":28 * # Py_ssize_t is just a special platform-specific type for indices * Py_ssize_t nrow = A.shape[0] * Py_ssize_t ncol = A.shape[1] # <<<<<<<<<<<<<< * Py_ssize_t ii, jj, nn * */ __pyx_v_ncol = (__pyx_v_A->dimensions[1]); /* "facedist.pyx":33 * # this line is particularly expensive, since creating a numpy array * # involves unavoidable Python API overhead * np.ndarray[np.float64_t, ndim=1] D = np.zeros( (nrow*(nrow-1)/2), np.float64) # <<<<<<<<<<<<<< * * np.float64_t rd */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromSsize_t(__Pyx_div_Py_ssize_t((__pyx_v_nrow * (__pyx_v_nrow - 1)), 2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_6 = 0; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_6 = 1; } } __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_D = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_D.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 33, __pyx_L1_error) } else {__pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":42 * * #with nogil: * for ii in prange(nrow, nogil=True, schedule='static', num_threads=24): # <<<<<<<<<<<<<< * for jj in range(ii + 1, nrow): * */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_6 = __pyx_v_nrow; if (1 == 0) abort(); { Py_ssize_t __pyx_parallel_temp0 = 0xbad0bad0; Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0; Py_ssize_t __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_5numpy_float64_t __pyx_parallel_temp3 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_10 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_10 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(24) private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_ii) lastprivate(__pyx_v_ii) lastprivate(__pyx_v_jj) lastprivate(__pyx_v_nn) lastprivate(__pyx_v_rd) schedule(static) #endif /* _OPENMP */ for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){ if (__pyx_parallel_why < 2) { __pyx_v_ii = (Py_ssize_t)(0 + 1 * __pyx_t_9); /* Initialize private variables to invalid values */ __pyx_v_jj = ((Py_ssize_t)0xbad0bad0); __pyx_v_nn = ((Py_ssize_t)0xbad0bad0); __pyx_v_rd = ((__pyx_t_5numpy_float64_t)__PYX_NAN()); /* "facedist.pyx":43 * #with nogil: * for ii in prange(nrow, nogil=True, schedule='static', num_threads=24): * for jj in range(ii + 1, nrow): # <<<<<<<<<<<<<< * * with gil: */ __pyx_t_11 = __pyx_v_nrow; for (__pyx_t_12 = (__pyx_v_ii + 1); __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_jj = __pyx_t_12; /* "facedist.pyx":45 * for jj in range(ii + 1, nrow): * * with gil: # <<<<<<<<<<<<<< * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) * nn = ii+jj*(jj-1)/2 */ { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif /*try:*/ { /* "facedist.pyx":46 * * with gil: * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) # <<<<<<<<<<<<<< * nn = ii+jj*(jj-1)/2 * */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_mean); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_cdist); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_ii, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_jj, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __pyx_t_5 = 0; __pyx_t_2 = 0; __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_metric, __pyx_n_s_euclidean) < 0) __PYX_ERR(0, 46, __pyx_L15_error) __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_13 == (npy_float64)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_rd = __pyx_t_13; /* "facedist.pyx":47 * with gil: * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) * nn = ii+jj*(jj-1)/2 # <<<<<<<<<<<<<< * * D[nn] = rd */ __pyx_v_nn = (__pyx_v_ii + __Pyx_div_Py_ssize_t((__pyx_v_jj * (__pyx_v_jj - 1)), 2)); } /* "facedist.pyx":45 * for jj in range(ii + 1, nrow): * * with gil: # <<<<<<<<<<<<<< * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) * nn = ii+jj*(jj-1)/2 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L16; } __pyx_L15_error: { #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L8_error; } __pyx_L16:; } } /* "facedist.pyx":49 * nn = ii+jj*(jj-1)/2 * * D[nn] = rd # <<<<<<<<<<<<<< * * return D */ __pyx_t_14 = __pyx_v_nn; *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_D.diminfo[0].strides) = __pyx_v_rd; } goto __pyx_L18; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L17; __pyx_L17:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_ii; __pyx_parallel_temp1 = __pyx_v_jj; __pyx_parallel_temp2 = __pyx_v_nn; __pyx_parallel_temp3 = __pyx_v_rd; } __pyx_L18:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = NULL; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = NULL; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = NULL; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = NULL; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = NULL; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = NULL; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_ii = __pyx_parallel_temp0; __pyx_v_jj = __pyx_parallel_temp1; __pyx_v_nn = __pyx_parallel_temp2; __pyx_v_rd = __pyx_parallel_temp3; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "facedist.pyx":42 * * #with nogil: * for ii in prange(nrow, nogil=True, schedule='static', num_threads=24): # <<<<<<<<<<<<<< * for jj in range(ii + 1, nrow): * */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "facedist.pyx":51 * D[nn] = rd * * return D # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_D)); __pyx_r = ((PyObject *)__pyx_v_D); goto __pyx_L0; /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("facedist.mean_dist", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 218, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 222, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":250 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 259, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 278, __pyx_L1_error) break; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)malloc(0xFF)); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 794, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 795, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 796, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 799, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 803, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 844, __pyx_L1_error) } __pyx_L15:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "facedist", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_A, __pyx_k_A, sizeof(__pyx_k_A), 0, 0, 1, 1}, {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_cdist, __pyx_k_cdist, sizeof(__pyx_k_cdist), 0, 0, 1, 1}, {&__pyx_n_s_dok_matrix, __pyx_k_dok_matrix, sizeof(__pyx_k_dok_matrix), 0, 0, 1, 1}, {&__pyx_n_s_euclidean, __pyx_k_euclidean, sizeof(__pyx_k_euclidean), 0, 0, 1, 1}, {&__pyx_n_s_facedist, __pyx_k_facedist, sizeof(__pyx_k_facedist), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_k_home_mlode_BA_Moritz_Implementa, sizeof(__pyx_k_home_mlode_BA_Moritz_Implementa), 0, 0, 1, 0}, {&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1}, {&__pyx_n_s_mean_dist, __pyx_k_mean_dist, sizeof(__pyx_k_mean_dist), 0, 0, 1, 1}, {&__pyx_n_s_metric, __pyx_k_metric, sizeof(__pyx_k_metric), 0, 0, 1, 1}, {&__pyx_n_s_ncol, __pyx_k_ncol, sizeof(__pyx_k_ncol), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_nn, __pyx_k_nn, sizeof(__pyx_k_nn), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_nrow, __pyx_k_nrow, sizeof(__pyx_k_nrow), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rd, __pyx_k_rd, sizeof(__pyx_k_rd), 0, 0, 1, 1}, {&__pyx_n_s_scipy_sparse, __pyx_k_scipy_sparse, sizeof(__pyx_k_scipy_sparse), 0, 0, 1, 1}, {&__pyx_n_s_scipy_spatial_distance, __pyx_k_scipy_spatial_distance, sizeof(__pyx_k_scipy_spatial_distance), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 43, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ __pyx_tuple__7 = PyTuple_Pack(8, __pyx_n_s_A, __pyx_n_s_nrow, __pyx_n_s_ncol, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_nn, __pyx_n_s_D, __pyx_n_s_rd); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(1, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_n_s_mean_dist, 21, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initfacedist(void); /*proto*/ PyMODINIT_FUNC initfacedist(void) #else PyMODINIT_FUNC PyInit_facedist(void); /*proto*/ PyMODINIT_FUNC PyInit_facedist(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_facedist(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("facedist", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_facedist) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "facedist")) { if (unlikely(PyDict_SetItemString(modules, "facedist", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "facedist.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * from scipy.spatial.distance import cdist * from scipy.sparse import dok_matrix */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":2 * import numpy as np * from scipy.spatial.distance import cdist # <<<<<<<<<<<<<< * from scipy.sparse import dok_matrix * cimport numpy as np */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_cdist); __Pyx_GIVEREF(__pyx_n_s_cdist); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_cdist); __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_spatial_distance, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_cdist); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cdist, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "facedist.pyx":3 * import numpy as np * from scipy.spatial.distance import cdist * from scipy.sparse import dok_matrix # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_dok_matrix); __Pyx_GIVEREF(__pyx_n_s_dok_matrix); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_dok_matrix); __pyx_t_1 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_dok_matrix); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_dok_matrix, __pyx_t_2) < 0) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_8facedist_1mean_dist, NULL, __pyx_n_s_facedist); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_mean_dist, __pyx_t_1) < 0) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * from scipy.spatial.distance import cdist * from scipy.sparse import dok_matrix */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init facedist", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init facedist"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* GetItemInt */ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
SpatialFractionalMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFractionalMaxPooling.c" #else static int64_t* THNN_(SpatialFractionalMaxPooling_generateIntervals)( real sample, int64_t inputSize, int64_t outputSize, int poolSize) { real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); int64_t* sequence = (int64_t*) THAlloc(sizeof(int64_t) * outputSize); int64_t i; for (i = 0; i < outputSize - 1; ++i) { sequence[i] = (int64_t) ((i + sample) * alpha) - (int64_t) (sample * alpha); } sequence[outputSize - 1] = inputSize - poolSize; return sequence; } static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( real* input, real* output, THIndex_t* indices, real* randomSamples, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH, int poolSizeW, int poolSizeH) { int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; ++plane) { /* each plane contains 2 random samples, one for W and one for H */ real* randomSamplesForPlane = randomSamples + plane * 2; /* Generate interval sequence */ int64_t* sequenceW = THNN_(SpatialFractionalMaxPooling_generateIntervals)( randomSamplesForPlane[0], inputW, outputW, poolSizeW); int64_t* sequenceH = THNN_(SpatialFractionalMaxPooling_generateIntervals)( randomSamplesForPlane[1], inputH, outputH, poolSizeH); /* loop over output */ int64_t h, w; real* inputForPlane = input + plane * inputW * inputH; real* outputForPlane = output + plane * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputW * outputH; for (h = 0; h < outputH; ++h) { int64_t inputHStart = sequenceH[h]; for (w = 0; w < outputW; ++w) { int64_t inputWStart = sequenceW[w]; real maxVal = -THInf; int64_t maxIndex = -1; int64_t h2, w2; for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) { for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) { THAssert(h2 >= 0 && h2 < inputH); THAssert(w2 >= 0 && w2 < inputW); int64_t planeIndex = h2 * inputW + w2; real val = inputForPlane[planeIndex]; if (val > maxVal) { maxVal = val; maxIndex = planeIndex; } } } THAssert(maxVal != -THInf); THAssert(maxIndex != -1); outputForPlane[h * outputW + w] = maxVal; /* +1 to lua index */ indicesForPlane[h * outputW + w] = maxIndex + TH_INDEX_BASE; } } THFree(sequenceW); THFree(sequenceH); } } void THNN_(SpatialFractionalMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor *indices, THTensor *randomSamples) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimensionLegacyNoScalars)(input); THNN_ARGCHECK(!input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (numInputDims == 4) { numBatch = THTensor_(size)(input, 0); planeDim++; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size)(input, planeDim); int64_t inputH = THTensor_(size)(input, heightDim); int64_t inputW = THTensor_(size)(input, widthDim); THArgCheck(outputH + poolSizeH - 1 <= inputH, 7, "poolSizeH (%d) too large relative to input height (%d)", poolSizeH, inputH); THArgCheck(outputW + poolSizeW - 1 <= inputW, 6, "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); /* get contiguous input */ input = THTensor_(newContiguous)(input); if (numInputDims == 3) { /* resize output */ THTensor_(resize3d)(output, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize3d)(indices, numPlanes, outputH, outputW); THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( THTensor_(data)(input), THTensor_(data)(output), THIndexTensor_(data)(indices), THTensor_(data)(randomSamples), numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } else { THTensor_(resize4d)(output, numBatch, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize4d)(indices, numBatch, numPlanes, outputH, outputW); int64_t batch; #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( THTensor_(data)(input) + batch * numPlanes * inputH * inputW, THTensor_(data)(output) + batch * numPlanes * outputH * outputW, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, THTensor_(data)(randomSamples) + batch * numPlanes * 2, numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } } /* cleanup */ THTensor_(free)(input); } static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( real* gradInput, real* gradOutput, THIndex_t* indices, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH) { int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; plane++) { real* gradInputForPlane = gradInput + plane * inputW * inputH; real* gradOutputForPlane = gradOutput + plane * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputW * outputH; int64_t h, w; for (h = 0; h < outputH; ++h) { for (w = 0; w < outputW; ++w) { int64_t outputIndex = h * outputW + w; int64_t index = indicesForPlane[outputIndex] - TH_INDEX_BASE; THAssert(index >= 0 && index < inputW * inputH); gradInputForPlane[index] += gradOutputForPlane[outputIndex]; } } } } void THNN_(SpatialFractionalMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor *indices) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimensionLegacyNoScalars)(input); if (numInputDims == 4) { numBatch = THTensor_(size)(input, 0); planeDim = 1; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size)(input, planeDim); int64_t inputH = THTensor_(size)(input, heightDim); int64_t inputW = THTensor_(size)(input, widthDim); THArgCheck(outputW == THTensor_(size)(gradOutput, widthDim), 3, "gradOutput width unexpected"); THArgCheck(outputH == THTensor_(size)(gradOutput, heightDim), 3, "gradOutput height unexpected"); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); /* backprop */ if (numInputDims == 3) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( THTensor_(data)(gradInput), THTensor_(data)(gradOutput), THIndexTensor_(data)(indices), numPlanes, inputW, inputH, outputW, outputH); } else { int64_t batch; #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( THTensor_(data)(gradInput) + batch * numPlanes * inputH * inputW, THTensor_(data)(gradOutput) + batch * numPlanes * outputH * outputW, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, numPlanes, inputW, inputH, outputW, outputH); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
symmetric_dirichlet_objective.h
#pragma once #ifndef OPTIMIZATION_LIB_SYMMETRIC_DIRICHLET_OBJECTIVE_H #define OPTIMIZATION_LIB_SYMMETRIC_DIRICHLET_OBJECTIVE_H // STL includes #include <vector> // Eigen includes #include <Eigen/Core> #include <Eigen/Sparse> // LIBIGL includes #include <igl/doublearea.h> // Optimization lib includes #include "../core/core.h" #include "../data_providers/plain_data_provider.h" #include "./dense_objective_function.h" template<Eigen::StorageOptions StorageOrder_> class SymmetricDirichlet : public DenseObjectiveFunction<StorageOrder_> { public: /** * Constructors and destructor */ SymmetricDirichlet(const std::shared_ptr<MeshDataProvider>& mesh_data_provider, const std::shared_ptr<EmptyDataProvider>& empty_data_provider) : DenseObjectiveFunction(mesh_data_provider, empty_data_provider, "Symmetric Dirichlet", 0, false) { this->Initialize(); } virtual ~SymmetricDirichlet() { } private: /** * Overrides */ void CalculateValue(double& f) override { bool inversions_exist = UpdateJ(X); // E = ||J||^2 + ||J^-1||^2 = ||J||^2 + ||J||^2 / det(J)^2 Eigen::VectorXd dirichlet = a.cwiseAbs2() + b.cwiseAbs2() + c.cwiseAbs2() + d.cwiseAbs2(); Eigen::VectorXd invDirichlet = dirichlet.cwiseQuotient(detJuv.cwiseAbs2()); Efi = dirichlet + invDirichlet; f = 0.5 * (Area.asDiagonal() * Efi).sum(); } void CalculateGradient(Eigen::VectorXd& g) override { bool inversions_exist = UpdateJ(X); UpdateSSVDFunction(); Eigen::MatrixX2d invs = s.cwiseInverse(); g.conservativeResize(X.size()); g.setZero(); ComputeDenseSSVDDerivatives(); for (int fi = 0; fi < numF; ++fi) { double gS = s(fi, 0) - pow(invs(fi, 0), 3); double gs = s(fi, 1) - pow(invs(fi, 1), 3); if (bound > 0) { gS += gS / (bound - Efi(fi)); gs += gs / (bound - Efi(fi)); } Eigen::Matrix<double, 6, 1> Dsdi0 = Dsd[0].col(fi); Eigen::Matrix<double, 6, 1> Dsdi1 = Dsd[1].col(fi); Eigen::Matrix<double, 6, 1> gi = Area(fi) * (Dsdi0 * gS + Dsdi1 * gs); for (int vi = 0; vi < 6; ++vi) { g(Fuv(vi, fi)) += gi(vi); } } } void PreUpdate(const Eigen::VectorXd& x) override { X = Eigen::Map<const Eigen::MatrixX2d>(x.data(), x.rows() >> 1, 2); } void PreInitialize() override { auto F = this->mesh_data_provider_->GetDomainFaces(); auto V = this->mesh_data_provider_->GetDomainVertices(); auto D1 = this->mesh_data_provider_->GetD1(); auto D2 = this->mesh_data_provider_->GetD2(); auto Fs = this->mesh_data_provider_->GetImageFaces(); this->F = this->mesh_data_provider_->GetImageFaces(); numF = Fs.rows(); numV = this->mesh_data_provider_->GetImageVerticesCount(); Fuv.resize(6, numF); Fuv.topRows(3) = Fs.transpose(); Fuv.bottomRows(3) = Fuv.topRows(3) + Eigen::MatrixXi::Constant(3, numF, static_cast<int>(numV)); a.resize(numF); b.resize(numF); c.resize(numF); d.resize(numF); s.resize(numF, 2); v.resize(numF, 4); u.resize(numF, 4); Dsd[0].resize(6, numF); Dsd[1].resize(6, numF); //Parameterization J mats resize detJuv.resize(numF); invdetJuv.resize(numF); DdetJuv_DUV.resize(static_cast<int>(numF), static_cast<int>(numV * 2)); // compute init energy matrices igl::doublearea(V, F, Area); Area /= 2; D1d = D1.transpose(); D2d = D2.transpose(); //columns belong to different faces a1d.resize(6, numF); a2d.resize(6, numF); b1d.resize(6, numF); b2d.resize(6, numF); a1d.topRows(3) = 0.5 * D1d; a1d.bottomRows(3) = 0.5 * D2d; a2d.topRows(3) = -0.5 * D2d; a2d.bottomRows(3) = 0.5 * D1d; b1d.topRows(3) = 0.5 * D1d; b1d.bottomRows(3) = -0.5 * D2d; b2d.topRows(3) = 0.5 * D2d; b2d.bottomRows(3) = 0.5 * D1d; Hi.resize(numF); } void InitializeTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { auto nfs = this->mesh_data_provider_->GetImageFaces().rows(); auto nvs = this->mesh_data_provider_->GetImageVerticesCount(); triplets.reserve(21 * nfs); for (int i = 0; i < nfs; ++i) { // for every face there is a 6x6 local hessian // we only need the 21 values contained in the upper // diagonal. they are access and also put into the // big hessian in column order. // base indices int uhbr = 3 * i; //upper_half_base_row int lhbr = 3 * i + static_cast<int>(nvs); // lower_half_base_row int lhbc = 3 * i; // left_half_base_col int rhbc = 3 * i + static_cast<int>(nvs); // right_half_base_col // first column triplets.push_back(Eigen::Triplet<double>(uhbr, lhbc, 0)); // second column triplets.push_back(Eigen::Triplet<double>(uhbr, lhbc + 1, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 1, lhbc + 1, 0)); // third column triplets.push_back(Eigen::Triplet<double>(uhbr, lhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 1, lhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 2, lhbc + 2, 0)); // fourth column triplets.push_back(Eigen::Triplet<double>(uhbr, rhbc, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 1, rhbc, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 2, rhbc, 0)); triplets.push_back(Eigen::Triplet<double>(lhbr, rhbc, 0)); // fifth column triplets.push_back(Eigen::Triplet<double>(uhbr, rhbc + 1, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 1, rhbc + 1, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 2, rhbc + 1, 0)); triplets.push_back(Eigen::Triplet<double>(lhbr, rhbc + 1, 0)); triplets.push_back(Eigen::Triplet<double>(lhbr + 1, rhbc + 1, 0)); // sixth column triplets.push_back(Eigen::Triplet<double>(uhbr, rhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 1, rhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(uhbr + 2, rhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(lhbr, rhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(lhbr + 1, rhbc + 2, 0)); triplets.push_back(Eigen::Triplet<double>(lhbr + 2, rhbc + 2, 0)); } } void CalculateRawTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { auto lambda1 = [](double a) {return a - 1.0 / (a * a * a); }; // gradient of outer function in composition Eigen::VectorXd gradfS = s.col(0).unaryExpr(lambda1); Eigen::VectorXd gradfs = s.col(1).unaryExpr(lambda1); auto lambda2 = [](double a) {return 1 + 3 / (a * a * a * a); }; // hessian of outer function in composition (diagonal) Eigen::VectorXd HS = s.col(0).unaryExpr(lambda2); Eigen::VectorXd Hs = s.col(1).unaryExpr(lambda2); // similarity alpha Eigen::VectorXd aY = 0.5 * (a + d); Eigen::VectorXd bY = 0.5 * (c - b); // anti similarity beta Eigen::VectorXd cY = 0.5 * (a - d); Eigen::VectorXd dY = 0.5 * (b + c); #pragma omp parallel for for (int i = 0; i < numF; ++i) { //vectors of size 6 //svd derivatives Eigen::Matrix<double, 6, 1> dSi = Dsd[0].col(i); Eigen::Matrix<double, 6, 1> dsi = Dsd[1].col(i); //cones constant coefficients (cone = |Ax|, A is a coefficient) Eigen::Matrix<double, 6, 1> a1i = a1d.col(i); Eigen::Matrix<double, 6, 1> a2i = a2d.col(i); Eigen::Matrix<double, 6, 1> b1i = b1d.col(i); Eigen::Matrix<double, 6, 1> b2i = b2d.col(i); Hi[i] = Area(i) * ComputeConvexConcaveFaceHessian( a1i, a2i, b1i, b2i, aY(i), bY(i), cY(i), dY(i), dSi, dsi, gradfS(i), gradfs(i), HS(i), Hs(i)); int index2 = i * 21; for (int a = 0; a < 6; ++a) { for (int b = 0; b <= a; ++b) { const_cast<double&>(triplets[index2++].value()) = Hi[i](a, b); } } } std::size_t nf = F.rows(); #pragma omp parallel for for (long i = 0; i < nf; i++) { std::size_t base = 21 * i; const_cast<double&>(triplets[base].value()) += 1e-6; const_cast<double&>(triplets[base + 2].value()) += 1e-6; const_cast<double&>(triplets[base + 5].value()) += 1e-6; const_cast<double&>(triplets[base + 9].value()) += 1e-6; const_cast<double&>(triplets[base + 14].value()) += 1e-6; const_cast<double&>(triplets[base + 20].value()) += 1e-6; } } float Q_rsqrt(float number) { const float x2 = number * 0.5F; const float threehalfs = 1.5F; union { float f; uint32_t i; } conv = { number }; // member 'f' set to value of 'number'. conv.i = 0x5f3759df - (conv.i >> 1); conv.f *= (threehalfs - (x2 * conv.f * conv.f)); return conv.f; } /** * Methods */ Eigen::Matrix<double, 6, 6> ComputeFaceConeHessian( const Eigen::Matrix<double, 6, 1>& A1, const Eigen::Matrix<double, 6, 1>& A2, double a1x, double a2x) { double f2 = a1x * a1x + a2x * a2x; double invf = 1.0 / sqrt(f2); double invf3 = invf * invf * invf; Eigen::Matrix<double, 6, 6> A1A1t = A1 * A1.transpose(); Eigen::Matrix<double, 6, 6> A2A2t = A2 * A2.transpose(); Eigen::Matrix<double, 6, 6> A1A2t = A1 * A2.transpose(); Eigen::Matrix<double, 6, 6> A2A1t = A1A2t.transpose(); double a2 = a1x * a1x; double b2 = a2x * a2x; double ab = a1x * a2x; return (invf - invf3 * a2) * A1A1t + (invf - invf3 * b2) * A2A2t - invf3 * ab * (A1A2t + A2A1t); } Eigen::Matrix<double, 6, 6> ComputeConvexConcaveFaceHessian( const Eigen::Matrix<double, 6, 1>& a1, const Eigen::Matrix<double, 6, 1>& a2, const Eigen::Matrix<double, 6, 1>& b1, const Eigen::Matrix<double, 6, 1>& b2, double aY, double bY, double cY, double dY, const Eigen::Matrix<double, 6, 1>& dSi, const Eigen::Matrix<double, 6, 1>& dsi, double gradfS, double gradfs, double HS, double Hs) { // no multiplying by area in this function // generalized gauss newton Eigen::Matrix<double, 6, 6> H = HS * dSi * dSi.transpose() + Hs * dsi * dsi.transpose(); double walpha = gradfS + gradfs; if (walpha > 0) { H += walpha * ComputeFaceConeHessian(a1, a2, aY, bY); } double wbeta = gradfS - gradfs; if (wbeta > 1e-7) { H += wbeta * ComputeFaceConeHessian(b1, b2, cY, dY); } return H; } bool UpdateJ(const Eigen::MatrixX2d& x) { Eigen::Map<const Eigen::Matrix3Xd> X1(x.data(), 3, F.rows()); Eigen::Map<const Eigen::Matrix3Xd> X2(x.col(1).data(), 3, F.rows()); a = D1d.cwiseProduct(X1).colwise().sum(); b = D2d.cwiseProduct(X1).colwise().sum(); c = D1d.cwiseProduct(X2).colwise().sum(); d = D2d.cwiseProduct(X2).colwise().sum(); detJuv = a.cwiseProduct(d) - b.cwiseProduct(c); return ((detJuv.array() < 0).any()); } void UpdateSSVDFunction() { #pragma omp parallel for for (int i = 0; i < a.size(); i++) { Eigen::Matrix2d A; Eigen::Matrix2d U, S, V; A << a[i], b[i], c[i], d[i]; Utils::SSVD2x2(A, U, S, V); u.row(i) << U(0), U(1), U(2), U(3); v.row(i) << V(0), V(1), V(2), V(3); s.row(i) << S(0), S(3); } } void ComputeDenseSSVDDerivatives() { // different columns belong to different faces Eigen::MatrixXd B(D1d * v.col(0).asDiagonal() + D2d * v.col(1).asDiagonal()); Eigen::MatrixXd C(D1d * v.col(2).asDiagonal() + D2d * v.col(3).asDiagonal()); Eigen::MatrixXd t1 = B * u.col(0).asDiagonal(); Eigen::MatrixXd t2 = B * u.col(1).asDiagonal(); Dsd[0].topRows(t1.rows()) = t1; Dsd[0].bottomRows(t1.rows()) = t2; t1 = C * u.col(2).asDiagonal(); t2 = C * u.col(3).asDiagonal(); Dsd[1].topRows(t1.rows()) = t1; Dsd[1].bottomRows(t1.rows()) = t2; } /** * Private fields */ Eigen::MatrixX2d X; double bound=0; Eigen::MatrixX3i F; Eigen::MatrixX2d V; Eigen::DenseIndex numV; Eigen::DenseIndex numE; Eigen::DenseIndex numS; Eigen::DenseIndex numF; // Jacobian of the parameterization per face Eigen::VectorXd a; Eigen::VectorXd b; Eigen::VectorXd c; Eigen::VectorXd d; Eigen::VectorXd detJuv; // (ad - bc) Eigen::VectorXd invdetJuv; // 1 / (ad - bc) Eigen::SparseMatrix<double> DdetJuv_DUV; // Jacobian of the function (detJuv) by UV // Singular values Eigen::MatrixX2d s; // Singular values s[0]>s[1] Eigen::MatrixX4d v; // Singular vectors Eigen::MatrixX4d u; // Singular vectors Eigen::MatrixXd Dsd[2]; // Singular values dense derivatives s[0]>s[1] // Efi = sum(Ef_dist.^2, 2), for data->Efi history Eigen::VectorXd Efi; // F of cut mesh for u and v indices 6XnumF Eigen::MatrixXi Fuv; Eigen::VectorXd Area; // Dense mesh derivative matrices Eigen::Matrix3Xd D1d, D2d; // Constant matrices for cones calculation Eigen::SparseMatrix<double> a1, a1t, a2, a2t, b1, b1t, b2, b2t; // Dense constant matrices for cones calculation Eigen::MatrixXd a1d, a2d, b1d, b2d; // Per face Hessians vector std::vector<Eigen::Matrix<double,6,6>> Hi; }; #endif
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; ++i) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; ++q) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; ++j) { for (i = 0; i < 3; ++i) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; ++j) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; ++i) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; ++i) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif /* Conjugate gradient algorithm to solve Ax=b. */ cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; ++i) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor); } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; ++b) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; ++b) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
HDAA_fmt_plug.c
/* HTTP Digest access authentication patch for john * * Written by Romain Raboin. OMP and intrinsics support by magnum * * This software is Copyright (c) 2008 Romain Raboin - romain.raboin at * gmail.com, and Copyright (c) 2012 magnum and it is hereby released to * the general public under the following terms: Redistribution and * use in source and binary forms, with or without modification, are * permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_HDAA; #elif FMT_REGISTERS_H john_register_one(&fmt_HDAA); #else #include <stdint.h> #include <string.h> #ifdef __MMX__ #include <mmintrin.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "simd-intrinsics.h" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #if !FAST_FORMATS_OMP #undef _OPENMP #endif #if defined(_OPENMP) #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "hdaa" #define FORMAT_NAME "HTTP Digest access authentication" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(reqinfo_t) #define SALT_ALIGN 4 #if defined(_OPENMP) static unsigned int omp_t = 1; #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #endif #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&0x1c)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SEPARATOR '$' #define FORMAT_TAG "$response$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define SIZE_TAB 12 // This is 8 x 64 bytes, so in MMX/SSE2 we support up to 9 limbs of MD5 #define HTMP 512 typedef struct { size_t h1tmplen; size_t h3tmplen; char h1tmp[HTMP]; char h3tmp[HTMP]; } reqinfo_t; /* digest authentication scheme : h1 = md5(user:realm:password) h2 = md5(method:digestURI) response = h3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2) */ /* request information */ enum e_req { R_RESPONSE, R_USER, R_REALM, R_METHOD, R_URI, R_NONCE, R_NONCECOUNT, R_CLIENTNONCE, R_QOP }; /* response:user:realm:method:uri:nonce:nonceCount:ClientNonce:qop */ static struct fmt_tests tests[] = { {"$response$679066476e67b5c7c4e88f04be567f8b$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$00000001$4b61913cec32e2c9$auth", "nocode"}, {"$response$faa6cb7d676e5b7c17fcbf966436aa0c$moi$myrealm$GET$/$af32592775d27b1cd06356b3a0db9ddf$00000001$8e1d49754a25aea7$auth", "kikou"}, {"$response$56940f87f1f53ade8b7d3c5a102c2bf3$usrx$teN__chars$GET$/4TLHS1TMN9cfsbqSUAdTG3CRq7qtXMptnYfn7mIIi3HRKOMhOks56e$2c0366dcbc$00000001$0153$auth", "passWOrd"}, {"$response$8663faf2337dbcb2c52882807592ec2c$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$", "pass"}, {"$response$8663faf2337dbcb2c52882807592ec2c$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a", "pass"}, {NULL} }; /* used by set_key */ static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_COEF_32 #define LIMBS 9 static unsigned char *saved_key[LIMBS]; static unsigned int *interm_key; static unsigned int *crypt_key; #else static int (*saved_len); static unsigned char (*crypt_key)[BINARY_SIZE]; #endif /* Store information about the request ()*/ static reqinfo_t *rinfo = NULL; static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 int i; #endif #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 for (i = 0; i < LIMBS; i++) saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt, 64, MEM_ALIGN_SIMD); interm_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { #ifdef SIMD_COEF_32 int i; #endif MEM_FREE(saved_plain); MEM_FREE(crypt_key); #ifdef SIMD_COEF_32 MEM_FREE(interm_key); for (i = 0; i < LIMBS; i++) MEM_FREE(saved_key[i]); #else MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if (!ishexlc(p) || strlen(p) != 32) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* user */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* realm */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* method */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* uri */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* nonce */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* End of legacy HDAA or noncecount */ goto end_hdaa_legacy; if ((p = strtokm(NULL, "$")) == NULL) /* clientnonce */ goto err; if (!ishexlc(p) ) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* qop */ goto err; if ((p = strtokm(NULL, "$")) != NULL) goto err; end_hdaa_legacy: MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } // Normalize shorter hashes, to allow with or without trailing '$' character. static char *split(char *ciphertext, int index, struct fmt_main *self) { char *cp; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return ciphertext; cp = ciphertext + TAG_LENGTH; cp = strchr(cp, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; // now if we have $binary_hash$ then we remove the last '$' char if (strlen(cp) == 1 + BINARY_SIZE*2 + 1) { static char out[256]; strnzcpy(out, ciphertext, sizeof(out)); out[strlen(out)-1] = 0; return out; } return ciphertext; } static void set_salt(void *salt) { rinfo = salt; } static void set_key(char *key, int index) { strcpy(saved_plain[index], key); #ifndef SIMD_COEF_32 saved_len[index] = -1; #endif } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; #ifdef _OPENMP for (; y < SIMD_PARA_MD5 * omp_t; y++) #else for (; y < SIMD_PARA_MD5; y++) #endif for (x = 0; x < SIMD_COEF_32; x++) { if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!(memcmp(binary, crypt_key[index], BINARY_SIZE))) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int i,x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; for (i=0;i<(BINARY_SIZE/4);i++) if ( ((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] ) return 0; return 1; #else return !(memcmp(binary, crypt_key[index], BINARY_SIZE)); #endif } static int cmp_exact(char *source, int index) { return 1; } /* convert hash from binary to ascii */ #ifdef SIMD_COEF_32 // This code should be rewritten in intrinsics, reading from // MMX or SSE2 output buffers and writing to MMX/SSE2 input buffers. inline static void sse_bin2ascii(unsigned char *conv, unsigned char *src) { unsigned int index; for (index = 0; index < NBKEYS; index++) { unsigned int i, j = 0; for (i = 0; i < BINARY_SIZE; i += 2) { unsigned int t; t = (src[GETOUTPOS((i + 1), index)] & 0x0f); t <<= 12; t |= (src[GETOUTPOS((i + 1), index)] & 0xf0); t <<= 4; t |= (src[GETOUTPOS(i, index)] & 0x0f); t <<= 8; t |= ((src[GETOUTPOS(i, index)] & 0xf0) >> 4); t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); *(unsigned int*)&conv[GETPOS(j, index)] = t; j+=4; } } } #endif /* SIMD_COEF_32 */ #ifdef __MMX__ inline static void bin2ascii(__m64 *conv, __m64 *src) { unsigned int i = 0; while (i != 4) { __m64 l; __m64 r; __m64 t; __m64 u; __m64 v; /* 32 bits to 64 bits */ t = _mm_set1_pi32(0x0f0f0f0f); /* Bit-wise AND the 64-bit values in M1 and M2. */ u = _mm_and_si64(_mm_srli_si64(src[(i / 2)], 4), t); v = _mm_and_si64(src[(i / 2)], t); /* interleaving */ l = _mm_unpacklo_pi8(u, v); r = _mm_unpackhi_pi8(u, v); t = _mm_set1_pi32(0x06060606); l = _mm_add_pi32(l, t); r = _mm_add_pi32(r, t); t = _mm_set1_pi32(0x01010101); /* u = (l << 4) & t */ u = _mm_and_si64(_mm_srli_si64(l, 4), t); /* v = (r << 4) & t */ v = _mm_and_si64(_mm_srli_si64(r, 4), t); t = _mm_set1_pi32(0x00270027); /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ u = _mm_mullo_pi16(u, t); v = _mm_mullo_pi16(v, t); t = _mm_set1_pi32(0x2a2a2a2a); u = _mm_add_pi32(u, t); v = _mm_add_pi32(v, t); conv[(i++)] = _mm_add_pi32(l, u); conv[(i++)] = _mm_add_pi32(r, v); } __asm__ __volatile__("emms"); } #else inline static void bin2ascii(uint32_t *conv, uint32_t *source) { unsigned char *src = (unsigned char*)source; unsigned int i; unsigned int j = 0; uint32_t t = 0; for (i = 0; i < BINARY_SIZE; i += 2) { #if (ARCH_LITTLE_ENDIAN == 0) t = (src[i] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[(i + 1)] & 0x0f); #else t = (src[(i + 1)] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x100; t += ((src[i] & 0xf0) >> 4); #endif t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); conv[(j++)] = t; } } #endif /* MMX */ #if SIMD_COEF_32 inline static void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; for (i = 0; i < BINARY_SIZE / 4; i++) { *d = *s; s += SIMD_COEF_32; d += SIMD_COEF_32; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if SIMD_COEF_32 #if defined(_OPENMP) #define ti (thread*NBKEYS+index) int thread; #pragma omp parallel for for (thread = 0; thread < (count+NBKEYS-1)/NBKEYS; thread++) #else #define thread 0 #define ti index #endif { static unsigned int crypt_len[NBKEYS]; unsigned int index, i, shortest, longest; for (index = 0; index < NBKEYS; index++) { int len; char temp; const char *key; key = rinfo->h1tmp; for (len = 0; len < rinfo->h1tmplen; len += 4, key += 4) *(uint32_t*)&saved_key[len>>6][GETPOS(len, ti)] = *(uint32_t*)key; len = rinfo->h1tmplen; key = (char*)&saved_plain[ti]; while((temp = *key++)) { saved_key[len>>6][GETPOS(len, ti)] = temp; len++; } saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(uint32_t*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; } SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &crypt_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); sse_bin2ascii((unsigned char*)&saved_key[0][thread*64*NBKEYS], (unsigned char*)&crypt_key[thread*4*NBKEYS]); longest = 0; shortest = HTMP; for (index = 0; index < NBKEYS; index++) { const char *key; int i, len; len = CIPHERTEXT_LENGTH - 1; key = rinfo->h3tmp + CIPHERTEXT_LENGTH; // Copy a char at a time until aligned at destination while (++len & 3) saved_key[len>>6][GETPOS(len, ti)] = *key++; // ...then a word at a time. This is a good boost, we are copying over 100 bytes. for (;len < rinfo->h3tmplen; len += 4, key += 4) *(uint32_t*)&saved_key[len>>6][GETPOS(len, ti)] = *(uint32_t*)key; len = rinfo->h3tmplen; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; //for (; i < (((len+8)>>6)+1)*64; i += 4) for (; i <= crypt_len[index]; i += 4) *(uint32_t*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; crypt_len[index] = len; if (len > longest) longest = len; if (len < shortest) shortest = len; } // First limb SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); // Copy any output that is done now if (shortest < 56) { if (longest < 56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done(interm_key, crypt_key, ti); } // Do the rest of the limbs for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SIMDmd5body(&saved_key[i][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], SSEi_RELOAD|SSEi_MIXED_IN); // Copy any output that is done now if (shortest < i*64+56) { if (shortest > (i-1)*64+55 && longest < i*64+56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done(interm_key, crypt_key, ti); } } } #undef thread #undef ti #else int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len; #ifdef _OPENMP char h3tmp[HTMP]; char h1tmp[HTMP]; #else char *h3tmp; char *h1tmp; #endif size_t tmp; #ifdef __MMX__ __m64 h1[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else uint32_t h1[BINARY_SIZE / sizeof(uint32_t)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif tmp = rinfo->h1tmplen; if ((len = saved_len[index]) < 0) len = saved_len[index] = strlen(saved_plain[index]); #ifdef _OPENMP memcpy(h1tmp, rinfo->h1tmp, tmp); memcpy(h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmplen - CIPHERTEXT_LENGTH); #else h3tmp = rinfo->h3tmp; h1tmp = rinfo->h1tmp; #endif memcpy(&h1tmp[tmp], saved_plain[index], len); MD5_Init(&ctx); MD5_Update(&ctx, h1tmp, len + tmp); MD5_Final((unsigned char*)h1, &ctx); bin2ascii(conv, h1); memcpy(h3tmp, conv, CIPHERTEXT_LENGTH); MD5_Init(&ctx); MD5_Update(&ctx, h3tmp, rinfo->h3tmplen); MD5_Final(crypt_key[index], &ctx); } #endif return count; } static char *mystrndup(const char *s, size_t n) { size_t tmp; size_t size; char *ret; for (tmp = 0; s[tmp] != 0 && tmp <= n; tmp++); size = n; if (tmp < size) size = tmp; if ((ret = mem_alloc(sizeof(char) * size + 1)) == NULL) return NULL; memmove(ret, s, size); ret[size] = 0; return ret; } static size_t reqlen(char *str) { size_t len; for (len = 0; str[len] != 0 && str[len] != SEPARATOR; len++); return len; } static void *get_salt(char *ciphertext) { int nb; int i; char *request[SIZE_TAB]; char *str; static reqinfo_t *r; #ifdef __MMX__ __m64 h2[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else unsigned int h2[BINARY_SIZE / sizeof(unsigned int)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif MD5_CTX ctx; /* parse the password string */ if (!r) r = mem_alloc_tiny(sizeof(*r), MEM_ALIGN_WORD); memset(r, 0, sizeof(*r)); for (nb = 0, i = 1; ciphertext[i] != 0; i++) { if (ciphertext[i] == SEPARATOR) { i++; request[nb] = mystrndup(&ciphertext[i], reqlen(&ciphertext[i])); nb++; if (!ciphertext[i]) break; } } while (nb < SIZE_TAB) { request[nb++] = NULL; } /* calculate h2 (h2 = md5(method:digestURI))*/ str = mem_alloc(strlen(request[R_METHOD]) + strlen(request[R_URI]) + 2); sprintf(str, "%s:%s", request[R_METHOD], request[R_URI]); MD5_Init(&ctx); MD5_Update(&ctx, str, strlen(str)); MD5_Final((unsigned char*)h2, &ctx); memset(conv, 0, CIPHERTEXT_LENGTH + 1); bin2ascii(conv, h2); MEM_FREE(str); /* create a part of h1 (h1tmp = request:realm:)*/ snprintf(r->h1tmp, HTMP - PLAINTEXT_LENGTH, "%s:%s:", request[R_USER], request[R_REALM]); /* create a part of h3 (h3tmp = nonce:noncecount:clientnonce:qop:h2)*/ if (request[R_CLIENTNONCE] == NULL) snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s", request[R_NONCE], (char*)conv); else snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s:%s:%s:%s", request[R_NONCE], request[R_NONCECOUNT], request[R_CLIENTNONCE], request[R_QOP], (char*)conv); r->h1tmplen = strlen(r->h1tmp); r->h3tmplen = strlen(&r->h3tmp[CIPHERTEXT_LENGTH]) + CIPHERTEXT_LENGTH; for (nb=0; nb < SIZE_TAB; ++nb) { MEM_FREE(request[nb]); } return r; } /* convert response to binary form */ static void *get_binary(char *ciphertext) { static unsigned int realcipher[BINARY_SIZE / sizeof(int)]; int i; ciphertext += TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { ((unsigned char*)realcipher)[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } return (void*) realcipher; } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_6; } #endif struct fmt_main fmt_HDAA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
test_encap_decap.c
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Encapsulate a secret and use the secret to encrypt a message Decapsulate the secret and use the secret to decrypt the encrypted message */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <pqnist/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 int main() { int i,rc; // Seed value for CSPRNG char seed[PQNIST_SEED_LENGTH]; octet SEED = {sizeof(seed),sizeof(seed),seed}; // Seed value for key generation char seedkeys[NTHREADS][PQNIST_SEED_LENGTH]; csprng RNG; // Initialization vector char iv[PQNIST_AES_IV_LENGTH]; octet IV= {sizeof(iv),sizeof(iv),iv}; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // AES CBC ciphertext char c[NTHREADS][MAXSIZE]; octet C[NTHREADS]; // non random seed value for (i=0; i<32; i++) SEED.val[i]=i+1; printf("SEED: "); OCT_output(&SEED); printf("\n"); // initialise random number generator CREATE_CSPRNG(&RNG,&SEED); // Initialise key generation seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seedkeys[i][j] = i; } } // Bob's SIKE keys uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; // Alice's BLS keys (not used) char BLSpk[NTHREADS][G2LEN]; char BLSsk[NTHREADS][BGS_BLS381]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { rc = pqnist_keys(seedkeys[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]); if (rc) { fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc); OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); exit(EXIT_FAILURE); } int j = OQS_KEM_sike_p751_length_public_key; printf("Bob SIKE pklen %d pk: ", j); amcl_print_hex(SIKEpk[i], j); j = OQS_KEM_sike_p751_length_secret_key; printf("Bob SIKE sklen %d sk: ", j); amcl_print_hex(SIKEsk[i], j); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; // Pad message int l = 16 - (P[i].len % 16); if (l < 16) { OCT_jbyte(&P[i],0,l); } } // Random initialization value generateRandom(&RNG,&IV); printf("Alice IV: "); OCT_output(&IV); // Copy plaintext for(i=0; i<NTHREADS; i++) { C[i].val = c[i]; C[i].max = MAXSIZE; OCT_copy(&C[i],&P[i]); printf("Alice Plaintext: "); OCT_output_string(&C[i]); printf("\n"); } // SIKE encapsulated key uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Generate an AES which is ecapsulated using SIKE. Use this key to // AES encrypt the K parameter. rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_encapsulate_encrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice ciphertext: "); OCT_output(&C[i]); printf("Alice ek %lu ek: ", sizeof(ek[i])); amcl_print_hex(ek[i], sizeof(ek[i])); printf("\n"); } // Bob #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Obtain encapsulated AES key and decrypt C rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_decapsulate_decrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Bob Plaintext: "); OCT_output(&C[i]); printf("Bob Plaintext: "); OCT_output_string(&C[i]); printf("\n"); // Compare sent and recieved message (returns 0 for failure) rc = OCT_comp(&P[i],&C[i]); if(!rc) { fprintf(stderr, "FAILURE OCT_comp rc: %d\n", rc); exit(EXIT_FAILURE); } } // clear memory OCT_clear(&IV); for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OCT_clear(&P[i]); OCT_clear(&C[i]); } KILL_CSPRNG(&RNG); printf("SUCCESS\n"); exit(EXIT_SUCCESS); }
GB_unaryop__identity_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_uint32 // op(A') function: GB_tran__identity_uint64_uint32 // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_uint32 ( uint64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_int64) // op(A') function: GB (_unop_tran__identity_fp32_int64) // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_int64) ( float *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
H2Pack_utils.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <omp.h> #include "H2Pack_config.h" #include "H2Pack_aux_structs.h" #include "H2Pack_utils.h" #include "utils.h" // Check if two boxes are admissible int H2P_check_box_admissible(const DTYPE *box0, const DTYPE *box1, const int pt_dim, const DTYPE alpha) { for (int i = 0; i < pt_dim; i++) { // Radius of each box's i-th dimension DTYPE r0 = box0[pt_dim + i]; DTYPE r1 = box1[pt_dim + i]; // Center of each box's i-th dimension DTYPE c0 = box0[i] + 0.5 * r0; DTYPE c1 = box1[i] + 0.5 * r1; DTYPE min_r = MIN(r0, r1); DTYPE dist = DABS(c0 - c1); if (dist >= alpha * min_r + 0.5 * (r0 + r1)) return 1; } return 0; } // Gather some columns from a matrix to another matrix void H2P_gather_matrix_columns( DTYPE *src_mat, const int src_ld, DTYPE *dst_mat, const int dst_ld, const int nrow, int *col_idx, const int ncol ) { for (int irow = 0; irow < nrow; irow++) { DTYPE *src_row = src_mat + src_ld * irow; DTYPE *dst_row = dst_mat + dst_ld * irow; for (int icol = 0; icol < ncol; icol++) dst_row[icol] = src_row[col_idx[icol]]; } } // Evaluate a kernel matrix with OpenMP parallelization void H2P_eval_kernel_matrix_OMP( const void *krnl_param, kernel_eval_fptr krnl_eval, const int krnl_dim, H2P_dense_mat_p x_coord, H2P_dense_mat_p y_coord, H2P_dense_mat_p kernel_mat ) { const int nx = x_coord->ncol; const int ny = y_coord->ncol; const int nrow = nx * krnl_dim; const int ncol = ny * krnl_dim; H2P_dense_mat_resize(kernel_mat, nrow, ncol); #pragma omp parallel { int tid = omp_get_thread_num(); int nt = omp_get_num_threads(); int nx_blk_start, nx_blk_len; calc_block_spos_len(nx, nt, tid, &nx_blk_start, &nx_blk_len); DTYPE *kernel_mat_srow = kernel_mat->data + nx_blk_start * krnl_dim * kernel_mat->ld; DTYPE *x_coord_spos = x_coord->data + nx_blk_start; krnl_eval( x_coord_spos, x_coord->ncol, nx_blk_len, y_coord->data, y_coord->ncol, ny, krnl_param, kernel_mat_srow, kernel_mat->ld ); } } // Check if a coordinate is in box [-L/2, L/2]^pt_dim int H2P_point_in_box(const int pt_dim, DTYPE *coord, DTYPE L) { int res = 1; DTYPE semi_L = L * 0.5; for (int i = 0; i < pt_dim; i++) { DTYPE coord_i = coord[i]; if ((coord_i < -semi_L) || (coord_i > semi_L)) { res = 0; break; } } return res; } // Generate npt uniformly distributed random points in a ring // [-L1/2, L1/2]^pt_dim excluding [-L0/2, L0/2]^pt_dim void H2P_gen_coord_in_ring(const int npt, const int pt_dim, const DTYPE L0, const DTYPE L1, DTYPE *coord, const int ldc) { const DTYPE semi_L1 = 0.5 * L1; DTYPE coord_i[8]; ASSERT_PRINTF(pt_dim <= 8, "Temporary array too small (8 < %d)\n", pt_dim); for (int i = 0; i < npt; i++) { int flag = 0; while (flag == 0) { for (int j = 0; j < pt_dim; j++) coord_i[j] = (DTYPE) drand48() * L1 - semi_L1; if ((H2P_point_in_box(pt_dim, coord_i, L1) == 1) && (H2P_point_in_box(pt_dim, coord_i, L0) == 0)) { flag = 1; for (int j = 0; j < pt_dim; j++) coord[j * ldc + i] = coord_i[j]; } } } } // Generate a random sparse matrix A for calculating y^T := A^T * x^T void H2P_gen_rand_sparse_mat_trans( const int max_nnz_col, const int k, const int n, H2P_dense_mat_p A_valbuf, H2P_int_vec_p A_idxbuf ) { // Note: we calculate y^T := A^T * x^T. Since x/y is row-major, // each of its row is a column of x^T/y^T. We can just use SpMV // to calculate y^T(:, i) := A^T * x^T(:, i). int rand_nnz_col = (max_nnz_col <= k) ? max_nnz_col : k; int nnz = n * rand_nnz_col; H2P_dense_mat_resize(A_valbuf, 1, nnz); H2P_int_vec_set_capacity(A_idxbuf, (n + 1) + nnz + k); DTYPE *val = A_valbuf->data; int *row_ptr = A_idxbuf->data; int *col_idx = row_ptr + (n + 1); int *flag = col_idx + nnz; memset(flag, 0, sizeof(int) * k); for (int i = 0; i < nnz; i++) val[i] = (DTYPE) (2.0 * (rand() & 1) - 1.0); for (int i = 0; i <= n; i++) row_ptr[i] = i * rand_nnz_col; for (int i = 0; i < n; i++) { int cnt = 0; int *row_i_cols = col_idx + i * rand_nnz_col; while (cnt < rand_nnz_col) { int col = rand() % k; if (flag[col] == 0) { flag[col] = 1; row_i_cols[cnt] = col; cnt++; } } for (int j = 0; j < rand_nnz_col; j++) flag[row_i_cols[j]] = 0; } A_idxbuf->length = (n + 1) + nnz; } // Calculate y^T := A^T * x^T, where A is a sparse matrix, x and y are row-major matrices void H2P_calc_sparse_mm_trans( const int m, const int n, const int k, H2P_dense_mat_p A_valbuf, H2P_int_vec_p A_idxbuf, DTYPE *x, const int ldx, DTYPE *y, const int ldy ) { const DTYPE *val = A_valbuf->data; const int *row_ptr = A_idxbuf->data; const int *col_idx = row_ptr + (n + 1); // A is k-by-n // Doing a naive OpenMP CSR SpMM here is good enough, using MKL SpBLAS is actually // slower, probably due to the cost of optimizing the storage of sparse matrix #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { DTYPE *x_i = x + i * ldx; DTYPE *y_i = y + i * ldy; for (int j = 0; j < n; j++) { DTYPE res = 0.0; #pragma omp simd for (int l = row_ptr[j]; l < row_ptr[j+1]; l++) res += val[l] * x_i[col_idx[l]]; y_i[j] = res; } } } // Generate normal distribution random number, Marsaglia polar method void H2P_gen_normal_distribution(const DTYPE mu, const DTYPE sigma, const size_t nelem, DTYPE *x) { DTYPE u1, u2, w, mult, x1, x2; for (size_t i = 0; i < nelem - 1; i += 2) { do { u1 = (DTYPE) (drand48() * 2.0 - 1.0); u2 = (DTYPE) (drand48() * 2.0 - 1.0); w = u1 * u1 + u2 * u2; } while (w >= 1.0 || w <= 1e-15); mult = DSQRT((-2.0 * DLOG(w)) / w); x1 = u1 * mult; x2 = u2 * mult; x[i] = mu + sigma * x1; x[i+1] = mu + sigma * x2; } if (nelem % 2) { do { u1 = (DTYPE) (drand48() * 2.0 - 1.0); u2 = (DTYPE) (drand48() * 2.0 - 1.0); w = u1 * u1 + u2 * u2; } while (w >= 1.0 || w <= 1e-15); mult = DSQRT((-2.0 * DLOG(w)) / w); x1 = u1 * mult; x[nelem - 1] = mu + sigma * x1; } } // Quick sorting an integer key-value pair array by key void H2P_qsort_int_key_val(int *key, int *val, int l, int r) { int i = l, j = r, tmp_key, tmp_val; int mid_key = key[(l + r) / 2]; while (i <= j) { while (key[i] < mid_key) i++; while (key[j] > mid_key) j--; if (i <= j) { tmp_key = key[i]; key[i] = key[j]; key[j] = tmp_key; tmp_val = val[i]; val[i] = val[j]; val[j] = tmp_val; i++; j--; } } if (i < r) H2P_qsort_int_key_val(key, val, i, r); if (j > l) H2P_qsort_int_key_val(key, val, l, j); } // Convert a integer COO matrix to a CSR matrix void H2P_int_COO_to_CSR( const int nrow, const int nnz, const int *row, const int *col, const int *val, int *row_ptr, int *col_idx, int *val_ ) { // Get the number of non-zeros in each row memset(row_ptr, 0, sizeof(int) * (nrow + 1)); for (int i = 0; i < nnz; i++) row_ptr[row[i] + 1]++; // Calculate the displacement of 1st non-zero in each row for (int i = 2; i <= nrow; i++) row_ptr[i] += row_ptr[i - 1]; // Use row_ptr to bucket sort col[] and val[] for (int i = 0; i < nnz; i++) { int idx = row_ptr[row[i]]; col_idx[idx] = col[i]; val_[idx] = val[i]; row_ptr[row[i]]++; } // Reset row_ptr for (int i = nrow; i >= 1; i--) row_ptr[i] = row_ptr[i - 1]; row_ptr[0] = 0; // Sort the non-zeros in each row according to column indices #pragma omp parallel for for (int i = 0; i < nrow; i++) H2P_qsort_int_key_val(col_idx, val_, row_ptr[i], row_ptr[i + 1] - 1); } // Get the value of integer CSR matrix element A(row, col) int H2P_get_int_CSR_elem( const int *row_ptr, const int *col_idx, const int *val, const int row, const int col ) { int res = 0; for (int i = row_ptr[row]; i < row_ptr[row + 1]; i++) { if (col_idx[i] == col) { res = val[i]; break; } } return res; } // Set the value of integer CSR matrix element A(row, col) to new_val void H2P_set_int_CSR_elem( const int *row_ptr, const int *col_idx, int *val, const int row, const int col, const int new_val ) { int has_element = 0; for (int i = row_ptr[row]; i < row_ptr[row + 1]; i++) { if (col_idx[i] == col) { val[i] = new_val; has_element = 1; break; } } if (has_element == 0) ERROR_PRINTF("CSR matrix element (%d, %d) not found, cannot be updated\n", row, col); } // Get B{node0, node1} from a H2Pack structure void H2P_get_Bij_block(H2Pack_p h2pack, const int node0, const int node1, H2P_dense_mat_p Bij) { int *B_p2i_rowptr = h2pack->B_p2i_rowptr; int *B_p2i_colidx = h2pack->B_p2i_colidx; int *B_p2i_val = h2pack->B_p2i_val; int B_idx = H2P_get_int_CSR_elem(B_p2i_rowptr, B_p2i_colidx, B_p2i_val, node0, node1); int need_trans = 0, node0_ = node0, node1_ = node1; if (B_idx == 0) { ERROR_PRINTF("B{%d, %d} does not exist!\n", node0, node1); return; } if (B_idx < 0) { need_trans = 1; B_idx = -B_idx; node0_ = node1; node1_ = node0; } B_idx--; int B_nrow = h2pack->B_nrow[B_idx]; int B_ncol = h2pack->B_ncol[B_idx]; H2P_dense_mat_resize(Bij, B_nrow, B_ncol); if (h2pack->BD_JIT == 0) { copy_matrix_block(sizeof(DTYPE), B_nrow, B_ncol, h2pack->B_data + h2pack->B_ptr[B_idx], B_ncol, Bij->data, B_ncol); } else { int n_point = h2pack->n_point; int krnl_dim = h2pack->krnl_dim; int *pt_cluster = h2pack->pt_cluster; int *node_level = h2pack->node_level; int level0 = node_level[node0_]; int level1 = node_level[node1_]; DTYPE *coord = h2pack->coord; void *krnl_param = h2pack->krnl_param; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; H2P_dense_mat_p *J_coord = h2pack->J_coord; // (1) Two nodes are of the same level, compress on both sides if (level0 == level1) { krnl_eval( J_coord[node0_]->data, J_coord[node0_]->ncol, J_coord[node0_]->ncol, J_coord[node1_]->data, J_coord[node1_]->ncol, J_coord[node1_]->ncol, krnl_param, Bij->data, J_coord[node1_]->ncol * krnl_dim ); } // (2) node1 is a leaf node and its level is higher than node0's level, // only compress on node0's side if (level0 > level1) { int pt_s1 = pt_cluster[2 * node1_]; int pt_e1 = pt_cluster[2 * node1_ + 1]; int node1_npt = pt_e1 - pt_s1 + 1; krnl_eval( J_coord[node0_]->data, J_coord[node0_]->ncol, J_coord[node0_]->ncol, coord + pt_s1, n_point, node1_npt, krnl_param, Bij->data, node1_npt * krnl_dim ); } // (3) node0 is a leaf node and its level is higher than node1's level, // only compress on node1's side if (level0 < level1) { int pt_s0 = pt_cluster[2 * node0_]; int pt_e0 = pt_cluster[2 * node0_ + 1]; int node0_npt = pt_e0 - pt_s0 + 1; krnl_eval( coord + pt_s0, n_point, node0_npt, J_coord[node1_]->data, J_coord[node1_]->ncol, J_coord[node1_]->ncol, krnl_param, Bij->data, J_coord[node1_]->ncol * krnl_dim ); } } // End of "if (h2pack->BD_JIT == 0)" if (need_trans) Bij->ld = -Bij->ld; } // Get D{node0, node1} from a H2Pack structure void H2P_get_Dij_block(H2Pack_p h2pack, const int node0, const int node1, H2P_dense_mat_p Dij) { int *D_p2i_rowptr = h2pack->D_p2i_rowptr; int *D_p2i_colidx = h2pack->D_p2i_colidx; int *D_p2i_val = h2pack->D_p2i_val; int D_idx = H2P_get_int_CSR_elem(D_p2i_rowptr, D_p2i_colidx, D_p2i_val, node0, node1); int need_trans = 0, node0_ = node0, node1_ = node1; if (D_idx == 0) { ERROR_PRINTF("D{%d, %d} does not exist!\n", node0, node1); return; } if (D_idx < 0) { need_trans = 1; D_idx = -D_idx; node0_ = node1; node1_ = node0; } D_idx--; int D_nrow = h2pack->D_nrow[D_idx]; int D_ncol = h2pack->D_ncol[D_idx]; H2P_dense_mat_resize(Dij, D_nrow, D_ncol); if (h2pack->BD_JIT == 0) { copy_matrix_block(sizeof(DTYPE), D_nrow, D_ncol, h2pack->D_data + h2pack->D_ptr[D_idx], D_ncol, Dij->data, D_ncol); } else { int n_point = h2pack->n_point; int krnl_dim = h2pack->krnl_dim; int *pt_cluster = h2pack->pt_cluster; int pt_s0 = pt_cluster[2 * node0_]; int pt_s1 = pt_cluster[2 * node1_]; int pt_e0 = pt_cluster[2 * node0_ + 1]; int pt_e1 = pt_cluster[2 * node1_ + 1]; int node0_npt = pt_e0 - pt_s0 + 1; int node1_npt = pt_e1 - pt_s1 + 1; DTYPE *coord = h2pack->coord; h2pack->krnl_eval( coord + pt_s0, n_point, node0_npt, coord + pt_s1, n_point, node1_npt, h2pack->krnl_param, Dij->data, node1_npt * krnl_dim ); } // End of "if (h2pack->BD_JIT == 0)" if (need_trans) Dij->ld = -Dij->ld; } // Partition work units into multiple blocks s.t. each block has // approximately the same amount of work void H2P_partition_workload( const int n_work, const size_t *work_sizes, const size_t total_size, const int n_block, H2P_int_vec_p blk_displs ) { H2P_int_vec_set_capacity(blk_displs, n_block + 1); blk_displs->data[0] = 0; for (int i = 1; i < blk_displs->capacity; i++) blk_displs->data[i] = n_work; size_t blk_size = total_size / n_block + 1; size_t curr_blk_size = 0; int idx = 1; for (int i = 0; i < n_work; i++) { curr_blk_size += work_sizes[i]; if (curr_blk_size >= blk_size) { blk_displs->data[idx] = i + 1; curr_blk_size = 0; idx++; } } if (curr_blk_size > 0) { blk_displs->data[idx] = n_work; idx++; } blk_displs->length = idx; } // Transpose a DTYPE matrix void H2P_transpose_dmat( const int n_thread, const int src_nrow, const int src_ncol, const DTYPE *src, const int lds, DTYPE *dst, const int ldd ) { if (n_thread == 1) { for (int i = 0; i < src_ncol; i++) { DTYPE *dst_irow = dst + i * ldd; for (int j = 0; j < src_nrow; j++) dst_irow[j] = src[j * lds + i]; } } else { if (src_nrow > src_ncol) { #pragma omp parallel for if(n_thread > 1) num_threads(n_thread) for (int i = 0; i < src_ncol; i++) { DTYPE *dst_irow = dst + i * ldd; for (int j = 0; j < src_nrow; j++) dst_irow[j] = src[j * lds + i]; } } else { #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); int blk_spos, blk_len; calc_block_spos_len(src_nrow, n_thread, tid, &blk_spos, &blk_len); for (int i = 0; i < src_ncol; i++) { DTYPE *dst_irow = dst + i * ldd; for (int j = blk_spos; j < blk_spos + blk_len; j++) dst_irow[j] = src[j * lds + i]; } } } // End of "if (src_nrow > src_ncol)" } // End of "if (n_thread == 1)" } // Shift the coordinates void H2P_shift_coord(H2P_dense_mat_p coord, const DTYPE *shift, const DTYPE scale) { for (int i = 0; i < coord->nrow; i++) { DTYPE *coord_dim_i = coord->data + i * coord->ld; #pragma omp simd for (int j = 0; j < coord->ncol; j++) coord_dim_i[j] += scale * shift[i]; } }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); *low_x=X/(X+Y+Z); *low_y=Y/(X+Y+Z); *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099*i-0.099); y_map[i].x=0.4321260306242638*(1.099*i-0.099); z_map[i].x=0.08392226148409894*(1.099*i-0.099); x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099); y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099); z_map[i].y=0.3994769827314126*(1.099*i-0.099); x_map[i].z=0.3848476530332144*(1.099*i-0.099); y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099); z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_sRGBTransformImage) #endif proceed=SetImageProgress(image,sRGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); if ((image->colorspace == GRAYColorspace) && (image->gamma != 1.0) && (colorspace == sRGBColorspace)) return(SetImageColorspace(image,colorspace,exception)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double X, Y, Z; X=cap_Y/low_y*low_x; Y=cap_Y; Z=cap_Y/low_y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(image,q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformsRGBImage) #endif proceed=SetImageProgress(image,TransformsRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
mttkrp_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "hicoo.h" #define CHUNKSIZE 1 /************************************************* * PRIVATE FUNCTIONS *************************************************/ int spt_OmpMTTKRPHiCOOKernels( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int spt_OmpMTTKRPHiCOOKernels_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); /************************************************* * PUBLIC FUNCTIONS *************************************************/ /** * OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. OpenMP atomic is used. * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] nt the number of threads * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptOmpMTTKRPHiCOO( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int nt) { sptAssert(spt_OmpMTTKRPHiCOOKernels(hitsr, mats, mats_order, mode, nt) == 0); return 0; } /** * OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. OpenMP atomic is used. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type. * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] nt the number of threads * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptOmpMTTKRPHiCOO_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int nt) { sptAssert(spt_OmpMTTKRPHiCOOKernels_MatrixTiling(hitsr, mats, mats_order, mode, nt) == 0); return 0; } /** * OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type. We independently parallelize it by rows of the superblock scheduler. * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] nt the number of threads * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, int balanced) { if(tk > 1) { if (balanced == 0) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); else if (balanced == 1) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); } return 0; } /** * OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type. We parallelize it by columns of the superblock scheduler, then use a parallel reduction. This is a privatization method. * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] nt the number of threads * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, int balanced) { if(tk > 1) { if(balanced == 0) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); else if (balanced == 1) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } return 0; } /************************************************* * PRIVATE FUNCTIONS *************************************************/ int spt_OmpMTTKRPHiCOOKernels_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int spt_OmpMTTKRPHiCOOKernels( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(spt_OmpMTTKRPHiCOOKernels_3D(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptNnzIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries } // End loop blocks /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop kernels // omp_destroy_lock(&lock); return 0; } int spt_OmpMTTKRPHiCOOKernels_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int spt_OmpMTTKRPHiCOOKernels_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(spt_OmpMTTKRPHiCOOKernels_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); // printf("tid: %d, (i, k): (%u, %u)\n", tid, i, k); if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop partitions */ for(sptIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(sptIndex j = j_begin; j < j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; // printf("tid: %d, (i, j): (%u, %u), kernel_num: %u\n", tid, i, j, kernel_num); sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop partitions */ for(sptIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(sptIndex j = j_begin; j < j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; for(int i = 0; i < tk; ++i) { sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(sptIndex j=j_begin; j<j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End kernels in a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Use copy_mats to reduce atomics */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } /* Calculate load balance of kernels */ #ifdef NNZ_STATISTICS sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(sptIndex j=j_begin; j<j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End kernels in a partition } // End loop kernels } // End loop iterations /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; /* Use copy_mats to reduce atomics */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; }
_vet.c
/* Generated by Cython 0.29.6 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [ "/home/aperez/.conda/envs/pysteps/lib/python3.6/site-packages/numpy/core/include/numpy/arrayobject.h", "/home/aperez/.conda/envs/pysteps/lib/python3.6/site-packages/numpy/core/include/numpy/ufuncobject.h" ], "extra_compile_args": [ "-fopenmp" ], "extra_link_args": [ "-fopenmp" ], "include_dirs": [ "/home/aperez/.conda/envs/pysteps/lib/python3.6/site-packages/numpy/core/include" ], "language": "c", "name": "pysteps.motion._vet", "sources": [ "pysteps/motion/_vet.pyx" ] }, "module_name": "pysteps.motion._vet" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_6" #define CYTHON_HEX_VERSION 0x001D06F0 #define CYTHON_FUTURE_DIVISION 1 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__pysteps__motion___vet #define __PYX_HAVE_API__pysteps__motion___vet /* Early includes */ #include <string.h> #include <stdio.h> #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "pysteps/motion/_vet.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":785 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":802 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":806 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":808 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":811 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":813 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "pysteps/motion/_vet.pyx":13 * cimport numpy as np * * ctypedef np.float64_t float64 # <<<<<<<<<<<<<< * ctypedef np.int8_t int8 * ctypedef np.intp_t intp */ typedef __pyx_t_5numpy_float64_t __pyx_t_7pysteps_6motion_4_vet_float64; /* "pysteps/motion/_vet.pyx":14 * * ctypedef np.float64_t float64 * ctypedef np.int8_t int8 # <<<<<<<<<<<<<< * ctypedef np.intp_t intp * */ typedef __pyx_t_5numpy_int8_t __pyx_t_7pysteps_6motion_4_vet_int8; /* "pysteps/motion/_vet.pyx":15 * ctypedef np.float64_t float64 * ctypedef np.int8_t int8 * ctypedef np.intp_t intp # <<<<<<<<<<<<<< * * from libc.math cimport floor, round */ typedef __pyx_t_5numpy_intp_t __pyx_t_7pysteps_6motion_4_vet_intp; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":816 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":817 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* BufferGetAndValidate.proto */ #define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ ((obj == Py_None || obj == NULL) ?\ (__Pyx_ZeroBuffer(buf), 0) :\ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static void __Pyx_ZeroBuffer(Py_buffer* buf); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); #define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* BufferFallbackError.proto */ static void __Pyx_RaiseBufferFallbackError(void); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); #define __Pyx_PyObject_Dict_GetItem(obj, name)\ (likely(PyDict_CheckExact(obj)) ?\ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) #endif /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cython' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'pysteps.motion._vet' */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet_float_abs(__pyx_t_7pysteps_6motion_4_vet_float64); /*proto*/ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_min(__pyx_t_7pysteps_6motion_4_vet_intp, __pyx_t_7pysteps_6motion_4_vet_intp); /*proto*/ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_t_7pysteps_6motion_4_vet_intp, __pyx_t_7pysteps_6motion_4_vet_intp); /*proto*/ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64 = { "float64", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_float64), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8 = { "int8", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_int8), { 0 }, 0, IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_int8) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_int8), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp = { "intp", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_intp), { 0 }, 0, IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_intp) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_intp), 0 }; #define __Pyx_MODULE_NAME "pysteps.motion._vet" extern int __pyx_module_is_main_pysteps__motion___vet; int __pyx_module_is_main_pysteps__motion___vet = 0; /* Implementation of 'pysteps.motion._vet' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_zip; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_l[] = "l"; static const char __pyx_k_m[] = "m"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_y[] = "y"; static const char __pyx_k_dx[] = "dx"; static const char __pyx_k_dy[] = "dy"; static const char __pyx_k_l0[] = "l0"; static const char __pyx_k_l1[] = "l1"; static const char __pyx_k_ll[] = "ll"; static const char __pyx_k_m0[] = "m0"; static const char __pyx_k_m1[] = "m1"; static const char __pyx_k_mm[] = "mm"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_nx[] = "nx"; static const char __pyx_k_ny[] = "ny"; static const char __pyx_k_xy[] = "xy"; static const char __pyx_k_f00[] = "f00"; static const char __pyx_k_f01[] = "f01"; static const char __pyx_k_f10[] = "f10"; static const char __pyx_k_f11[] = "f11"; static const char __pyx_k_l_i[] = "l_i"; static const char __pyx_k_m_j[] = "m_j"; static const char __pyx_k_sum[] = "sum"; static const char __pyx_k_zip[] = "zip"; static const char __pyx_k_axis[] = "axis"; static const char __pyx_k_full[] = "full"; static const char __pyx_k_int8[] = "int8"; static const char __pyx_k_intp[] = "intp"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mask[] = "mask"; static const char __pyx_k_mean[] = "mean"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_warp[] = "_warp"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_i_max[] = "i_max"; static const char __pyx_k_i_min[] = "i_min"; static const char __pyx_k_i_sec[] = "i_sec"; static const char __pyx_k_image[] = "image"; static const char __pyx_k_j_max[] = "j_max"; static const char __pyx_k_j_min[] = "j_min"; static const char __pyx_k_j_sec[] = "j_sec"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_arange[] = "arange"; static const char __pyx_k_buffer[] = "buffer"; static const char __pyx_k_counts[] = "counts"; static const char __pyx_k_df_dx2[] = "df_dx2"; static const char __pyx_k_df_dy2[] = "df_dy2"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_unique[] = "unique"; static const char __pyx_k_x_ceil[] = "x_ceil"; static const char __pyx_k_y_ceil[] = "y_ceil"; static const char __pyx_k_df_dxdy[] = "df_dxdy"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_i_shift[] = "i_shift"; static const char __pyx_k_j_shift[] = "j_shift"; static const char __pyx_k_reshape[] = "reshape"; static const char __pyx_k_x_float[] = "x_float"; static const char __pyx_k_x_floor[] = "x_floor"; static const char __pyx_k_x_guess[] = "x_guess"; static const char __pyx_k_y_float[] = "y_float"; static const char __pyx_k_y_floor[] = "y_floor"; static const char __pyx_k_y_guess[] = "y_guess"; static const char __pyx_k_gradient[] = "gradient"; static const char __pyx_k_new_image[] = "new_image"; static const char __pyx_k_residuals[] = "residuals"; static const char __pyx_k_x_max_int[] = "x_max_int"; static const char __pyx_k_x_sectors[] = "x_sectors"; static const char __pyx_k_y_max_int[] = "y_max_int"; static const char __pyx_k_y_sectors[] = "y_sectors"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_morph_mask[] = "morph_mask"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_grad_smooth[] = "grad_smooth"; static const char __pyx_k_input_image[] = "input_image"; static const char __pyx_k_interp_coef[] = "interp_coef"; static const char __pyx_k_sector_area[] = "sector_area"; static const char __pyx_k_smooth_gain[] = "smooth_gain"; static const char __pyx_k_x_max_float[] = "x_max_float"; static const char __pyx_k_y_max_float[] = "y_max_float"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_displacement[] = "displacement"; static const char __pyx_k_morphed_mask[] = "morphed_mask"; static const char __pyx_k_return_index[] = "return_index"; static const char __pyx_k_x_image_size[] = "x_image_size"; static const char __pyx_k_y_image_size[] = "y_image_size"; static const char __pyx_k_cost_function[] = "_cost_function"; static const char __pyx_k_gradient_data[] = "_gradient_data"; static const char __pyx_k_morphed_image[] = "morphed_image"; static const char __pyx_k_return_counts[] = "return_counts"; static const char __pyx_k_x_sector_size[] = "x_sector_size"; static const char __pyx_k_y_sector_size[] = "y_sector_size"; static const char __pyx_k_grad_residuals[] = "grad_residuals"; static const char __pyx_k_template_image[] = "template_image"; static const char __pyx_k_gradient_values[] = "gradient_values"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_smoothness_penalty[] = "smoothness_penalty"; static const char __pyx_k_pysteps_motion__vet[] = "pysteps.motion._vet"; static const char __pyx_k_sector_displacement[] = "sector_displacement"; static const char __pyx_k_pysteps_motion__vet_pyx[] = "pysteps/motion/_vet.pyx"; static const char __pyx_k_inloop_smoothness_penalty[] = "inloop_smoothness_penalty"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_Error_computing_cost_function[] = "Error computing cost function.\n"; static const char __pyx_k_Cython_module_for_morphing_and[] = "\nCython module for morphing and cost functions implementations used in\nin the Variation Echo Tracking Algorithm\n"; static const char __pyx_k_The_number_of_sectors_in_x_axis[] = "The number of sectors in x axis (axis=0) don't divide the image size"; static const char __pyx_k_The_number_of_sectors_in_y_axis[] = "The number of sectors in y axis (axis=1) don't divide the image size"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_u_Error_computing_cost_function; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_kp_u_The_number_of_sectors_in_x_axis; static PyObject *__pyx_kp_u_The_number_of_sectors_in_y_axis; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_arange; static PyObject *__pyx_n_s_axis; static PyObject *__pyx_n_s_buffer; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_cost_function; static PyObject *__pyx_n_s_counts; static PyObject *__pyx_n_s_df_dx2; static PyObject *__pyx_n_s_df_dxdy; static PyObject *__pyx_n_s_df_dy2; static PyObject *__pyx_n_s_displacement; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_f00; static PyObject *__pyx_n_s_f01; static PyObject *__pyx_n_s_f10; static PyObject *__pyx_n_s_f11; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_u_float64; static PyObject *__pyx_n_s_full; static PyObject *__pyx_n_s_grad_residuals; static PyObject *__pyx_n_s_grad_smooth; static PyObject *__pyx_n_s_gradient; static PyObject *__pyx_n_s_gradient_data; static PyObject *__pyx_n_s_gradient_values; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_i_max; static PyObject *__pyx_n_s_i_min; static PyObject *__pyx_n_s_i_sec; static PyObject *__pyx_n_s_i_shift; static PyObject *__pyx_n_s_image; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_inloop_smoothness_penalty; static PyObject *__pyx_n_s_input_image; static PyObject *__pyx_n_s_int8; static PyObject *__pyx_n_s_interp_coef; static PyObject *__pyx_n_s_intp; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_j_max; static PyObject *__pyx_n_s_j_min; static PyObject *__pyx_n_s_j_sec; static PyObject *__pyx_n_s_j_shift; static PyObject *__pyx_n_s_l; static PyObject *__pyx_n_s_l0; static PyObject *__pyx_n_s_l1; static PyObject *__pyx_n_s_l_i; static PyObject *__pyx_n_s_ll; static PyObject *__pyx_n_s_m; static PyObject *__pyx_n_s_m0; static PyObject *__pyx_n_s_m1; static PyObject *__pyx_n_s_m_j; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mask; static PyObject *__pyx_n_s_mean; static PyObject *__pyx_n_s_mm; static PyObject *__pyx_n_s_morph_mask; static PyObject *__pyx_n_s_morphed_image; static PyObject *__pyx_n_s_morphed_mask; static PyObject *__pyx_n_s_name; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_new_image; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_nx; static PyObject *__pyx_n_s_ny; static PyObject *__pyx_n_s_pysteps_motion__vet; static PyObject *__pyx_kp_s_pysteps_motion__vet_pyx; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reshape; static PyObject *__pyx_n_s_residuals; static PyObject *__pyx_n_s_return_counts; static PyObject *__pyx_n_s_return_index; static PyObject *__pyx_n_s_sector_area; static PyObject *__pyx_n_s_sector_displacement; static PyObject *__pyx_n_s_smooth_gain; static PyObject *__pyx_n_s_smoothness_penalty; static PyObject *__pyx_n_s_sum; static PyObject *__pyx_n_s_template_image; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_unique; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_warp; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_x_ceil; static PyObject *__pyx_n_s_x_float; static PyObject *__pyx_n_s_x_floor; static PyObject *__pyx_n_s_x_guess; static PyObject *__pyx_n_s_x_image_size; static PyObject *__pyx_n_s_x_max_float; static PyObject *__pyx_n_s_x_max_int; static PyObject *__pyx_n_s_x_sector_size; static PyObject *__pyx_n_s_x_sectors; static PyObject *__pyx_n_s_xy; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_y_ceil; static PyObject *__pyx_n_s_y_float; static PyObject *__pyx_n_s_y_floor; static PyObject *__pyx_n_s_y_guess; static PyObject *__pyx_n_s_y_image_size; static PyObject *__pyx_n_s_y_max_float; static PyObject *__pyx_n_s_y_max_int; static PyObject *__pyx_n_s_y_sector_size; static PyObject *__pyx_n_s_y_sectors; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_n_s_zip; static PyObject *__pyx_pf_7pysteps_6motion_4_vet__warp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_image, PyArrayObject *__pyx_v_mask, PyArrayObject *__pyx_v_displacement, int __pyx_v_gradient); /* proto */ static PyObject *__pyx_pf_7pysteps_6motion_4_vet_2_cost_function(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_sector_displacement, PyArrayObject *__pyx_v_template_image, PyArrayObject *__pyx_v_input_image, PyArrayObject *__pyx_v_mask, float __pyx_v_smooth_gain, int __pyx_v_gradient); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_float_1_0; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_4; static PyObject *__pyx_tuple_; static PyObject *__pyx_slice__3; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; /* Late includes */ /* "pysteps/motion/_vet.pyx":21 * cimport numpy as np * * cdef inline float64 float_abs(float64 a) nogil: return a if a > 0. else -a # <<<<<<<<<<<<<< * """ Return the absolute value of a float """ * */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet_float_abs(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_a) { __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_t_1; if (((__pyx_v_a > 0.) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = (-__pyx_v_a); } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":24 * """ Return the absolute value of a float """ * * cdef inline intp int_min(intp a, intp b) nogil: return a if a < b else b # <<<<<<<<<<<<<< * * cdef inline intp int_max(intp a, intp b) nogil: return a if a > b else b */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_min(__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_a, __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_b) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_r; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_1; if (((__pyx_v_a < __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":26 * cdef inline intp int_min(intp a, intp b) nogil: return a if a < b else b * * cdef inline intp int_max(intp a, intp b) nogil: return a if a > b else b # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_a, __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_b) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_r; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_1; if (((__pyx_v_a > __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":29 * * @cython.cdivision(True) * cdef inline float64 _linear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 x1, * float64 x2, */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y2) { __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r; int __pyx_t_1; /* "pysteps/motion/_vet.pyx":39 * """ * * if float_abs(x1 - x2) < 1e-6: # <<<<<<<<<<<<<< * return y1 * */ __pyx_t_1 = ((__pyx_f_7pysteps_6motion_4_vet_float_abs((__pyx_v_x1 - __pyx_v_x2)) < 1e-6) != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":40 * * if float_abs(x1 - x2) < 1e-6: * return y1 # <<<<<<<<<<<<<< * * return y1 + (x - x1) * (y2 - y1) / (x2 - x1) */ __pyx_r = __pyx_v_y1; goto __pyx_L0; /* "pysteps/motion/_vet.pyx":39 * """ * * if float_abs(x1 - x2) < 1e-6: # <<<<<<<<<<<<<< * return y1 * */ } /* "pysteps/motion/_vet.pyx":42 * return y1 * * return y1 + (x - x1) * (y2 - y1) / (x2 - x1) # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ __pyx_r = (__pyx_v_y1 + (((__pyx_v_x - __pyx_v_x1) * (__pyx_v_y2 - __pyx_v_y1)) / (__pyx_v_x2 - __pyx_v_x1))); goto __pyx_L0; /* "pysteps/motion/_vet.pyx":29 * * @cython.cdivision(True) * cdef inline float64 _linear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 x1, * float64 x2, */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":45 * * @cython.cdivision(True) * cdef inline float64 _bilinear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 y, * float64 x1, */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__bilinear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q11, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q12, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q21, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q22) { __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f_x_y1; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f_x_y2; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r; /* "pysteps/motion/_vet.pyx":59 * cdef float64 f_x_y1, f_x_y2 * * f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) # <<<<<<<<<<<<<< * f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) * return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) */ __pyx_v_f_x_y1 = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_x, __pyx_v_x1, __pyx_v_x2, __pyx_v_q11, __pyx_v_q21); /* "pysteps/motion/_vet.pyx":60 * * f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) * f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) # <<<<<<<<<<<<<< * return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) * */ __pyx_v_f_x_y2 = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_x, __pyx_v_x1, __pyx_v_x2, __pyx_v_q12, __pyx_v_q22); /* "pysteps/motion/_vet.pyx":61 * f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) * f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) * return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_r = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_y, __pyx_v_y1, __pyx_v_y2, __pyx_v_f_x_y1, __pyx_v_f_x_y2); goto __pyx_L0; /* "pysteps/motion/_vet.pyx":45 * * @cython.cdivision(True) * cdef inline float64 _bilinear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 y, * float64 x1, */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ /* Python wrapper */ static PyObject *__pyx_pw_7pysteps_6motion_4_vet_1_warp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7pysteps_6motion_4_vet__warp[] = "\n Morph image by applying a displacement field (Warping).\n \n The new image is created by selecting for each position the values of the\n input image at the positions given by the x and y displacements. \n The routine works in a backward sense. \n The displacement vectors have to refer to their destination.\n \n For more information in Morphing functions see Section 3 in \n `Beezley and Mandel (2008)`_.\n \n Beezley, J. D., & Mandel, J. (2008). \n Morphing ensemble Kalman filters. Tellus A, 60(1), 131-140.\n \n .. _`Beezley and Mandel (2008)`: http://dx.doi.org/10.1111/ j.1600-0870.2007.00275.x\n\n \n The displacement field in x and y directions and the image must have the\n same dimensions.\n \n The morphing is executed in parallel over x axis.\n \n The value of displaced pixels that fall outside the limits takes the \n value of the nearest edge. Those pixels are indicated by values greater\n than 1 in the output mask.\n \n Parameters\n ----------\n \n image : ndarray (ndim = 2)\n Image to morph\n \n displacement : ndarray (ndim = 3)\n Displacement field to be applied (Warping). \n \n The dimensions are:\n displacement [ x (0) or y (1) , \n i index of pixel, j index of pixel ]\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n\n Returns\n -------\n \n image : ndarray (float64 ,ndim = 2)\n Morphed image.\n \n mask : ndarray (int8 ,ndim = 2)\n Invalid values mask. Points outside the boundaries are masked.\n Values greater than 1, indicate masked values.\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n "; static PyMethodDef __pyx_mdef_7pysteps_6motion_4_vet_1_warp = {"_warp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7pysteps_6motion_4_vet_1_warp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7pysteps_6motion_4_vet__warp}; static PyObject *__pyx_pw_7pysteps_6motion_4_vet_1_warp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_image = 0; PyArrayObject *__pyx_v_mask = 0; PyArrayObject *__pyx_v_displacement = 0; int __pyx_v_gradient; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_warp (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_image,&__pyx_n_s_mask,&__pyx_n_s_displacement,&__pyx_n_s_gradient,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_image)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, 1); __PYX_ERR(0, 67, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_displacement)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, 2); __PYX_ERR(0, 67, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_warp") < 0)) __PYX_ERR(0, 67, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_image = ((PyArrayObject *)values[0]); __pyx_v_mask = ((PyArrayObject *)values[1]); __pyx_v_displacement = ((PyArrayObject *)values[2]); if (values[3]) { __pyx_v_gradient = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_gradient == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) } else { /* "pysteps/motion/_vet.pyx":70 * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, * bint gradient=False): # <<<<<<<<<<<<<< * """ * Morph image by applying a displacement field (Warping). */ __pyx_v_gradient = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pysteps.motion._vet._warp", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_image), __pyx_ptype_5numpy_ndarray, 1, "image", 0))) __PYX_ERR(0, 67, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 68, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_displacement), __pyx_ptype_5numpy_ndarray, 1, "displacement", 0))) __PYX_ERR(0, 69, __pyx_L1_error) __pyx_r = __pyx_pf_7pysteps_6motion_4_vet__warp(__pyx_self, __pyx_v_image, __pyx_v_mask, __pyx_v_displacement, __pyx_v_gradient); /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7pysteps_6motion_4_vet__warp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_image, PyArrayObject *__pyx_v_mask, PyArrayObject *__pyx_v_displacement, int __pyx_v_gradient) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_nx; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_ny; PyArrayObject *__pyx_v_new_image = 0; PyArrayObject *__pyx_v_morphed_mask = 0; PyArrayObject *__pyx_v_gradient_values = 0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_max_int; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_max_int; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x_max_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y_max_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_dx; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_dy; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_floor; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_ceil; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_floor; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_ceil; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f00; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f10; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f01; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f11; __Pyx_LocalBuf_ND __pyx_pybuffernd_displacement; __Pyx_Buffer __pyx_pybuffer_displacement; __Pyx_LocalBuf_ND __pyx_pybuffernd_gradient_values; __Pyx_Buffer __pyx_pybuffer_gradient_values; __Pyx_LocalBuf_ND __pyx_pybuffernd_image; __Pyx_Buffer __pyx_pybuffer_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask; __Pyx_Buffer __pyx_pybuffer_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_morphed_mask; __Pyx_Buffer __pyx_pybuffer_morphed_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_new_image; __Pyx_Buffer __pyx_pybuffer_new_image; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_9; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_10; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_11; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_12; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_13; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; int __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; Py_ssize_t __pyx_t_45; Py_ssize_t __pyx_t_46; Py_ssize_t __pyx_t_47; Py_ssize_t __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; Py_ssize_t __pyx_t_53; Py_ssize_t __pyx_t_54; Py_ssize_t __pyx_t_55; Py_ssize_t __pyx_t_56; Py_ssize_t __pyx_t_57; Py_ssize_t __pyx_t_58; Py_ssize_t __pyx_t_59; Py_ssize_t __pyx_t_60; Py_ssize_t __pyx_t_61; Py_ssize_t __pyx_t_62; Py_ssize_t __pyx_t_63; Py_ssize_t __pyx_t_64; Py_ssize_t __pyx_t_65; Py_ssize_t __pyx_t_66; Py_ssize_t __pyx_t_67; Py_ssize_t __pyx_t_68; Py_ssize_t __pyx_t_69; Py_ssize_t __pyx_t_70; Py_ssize_t __pyx_t_71; Py_ssize_t __pyx_t_72; Py_ssize_t __pyx_t_73; Py_ssize_t __pyx_t_74; Py_ssize_t __pyx_t_75; __Pyx_RefNannySetupContext("_warp", 0); __pyx_pybuffer_new_image.pybuffer.buf = NULL; __pyx_pybuffer_new_image.refcount = 0; __pyx_pybuffernd_new_image.data = NULL; __pyx_pybuffernd_new_image.rcbuffer = &__pyx_pybuffer_new_image; __pyx_pybuffer_morphed_mask.pybuffer.buf = NULL; __pyx_pybuffer_morphed_mask.refcount = 0; __pyx_pybuffernd_morphed_mask.data = NULL; __pyx_pybuffernd_morphed_mask.rcbuffer = &__pyx_pybuffer_morphed_mask; __pyx_pybuffer_gradient_values.pybuffer.buf = NULL; __pyx_pybuffer_gradient_values.refcount = 0; __pyx_pybuffernd_gradient_values.data = NULL; __pyx_pybuffernd_gradient_values.rcbuffer = &__pyx_pybuffer_gradient_values; __pyx_pybuffer_image.pybuffer.buf = NULL; __pyx_pybuffer_image.refcount = 0; __pyx_pybuffernd_image.data = NULL; __pyx_pybuffernd_image.rcbuffer = &__pyx_pybuffer_image; __pyx_pybuffer_mask.pybuffer.buf = NULL; __pyx_pybuffer_mask.refcount = 0; __pyx_pybuffernd_mask.data = NULL; __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask; __pyx_pybuffer_displacement.pybuffer.buf = NULL; __pyx_pybuffer_displacement.refcount = 0; __pyx_pybuffernd_displacement.data = NULL; __pyx_pybuffernd_displacement.rcbuffer = &__pyx_pybuffer_displacement; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_pybuffernd_image.diminfo[0].strides = __pyx_pybuffernd_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_image.diminfo[0].shape = __pyx_pybuffernd_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_image.diminfo[1].strides = __pyx_pybuffernd_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_image.diminfo[1].shape = __pyx_pybuffernd_image.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask.diminfo[1].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask.diminfo[1].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_v_displacement, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_pybuffernd_displacement.diminfo[0].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_displacement.diminfo[0].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_displacement.diminfo[1].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_displacement.diminfo[1].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_displacement.diminfo[2].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_displacement.diminfo[2].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[2]; /* "pysteps/motion/_vet.pyx":130 * """ * * cdef intp nx = <intp> image.shape[0] # <<<<<<<<<<<<<< * cdef intp ny = <intp> image.shape[1] * */ __pyx_v_nx = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_image->dimensions[0])); /* "pysteps/motion/_vet.pyx":131 * * cdef intp nx = <intp> image.shape[0] * cdef intp ny = <intp> image.shape[1] # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 2] new_image = ( */ __pyx_v_ny = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_image->dimensions[1])); /* "pysteps/motion/_vet.pyx":134 * * cdef np.ndarray[float64, ndim = 2] new_image = ( * np.zeros([nx, ny], dtype=np.float64)) # <<<<<<<<<<<<<< * * cdef np.ndarray[int8, ndim = 2] morphed_mask = ( */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 134, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_new_image = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_new_image.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 133, __pyx_L1_error) } else {__pyx_pybuffernd_new_image.diminfo[0].strides = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_new_image.diminfo[0].shape = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_new_image.diminfo[1].strides = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_new_image.diminfo[1].shape = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.shape[1]; } } __pyx_t_6 = 0; __pyx_v_new_image = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":137 * * cdef np.ndarray[int8, ndim = 2] morphed_mask = ( * np.zeros([nx, ny], dtype=np.int8)) # <<<<<<<<<<<<<< * * morphed_mask[mask > 0] = 1.0 */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 137, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_morphed_mask = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 136, __pyx_L1_error) } else {__pyx_pybuffernd_morphed_mask.diminfo[0].strides = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_mask.diminfo[0].shape = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_mask.diminfo[1].strides = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_mask.diminfo[1].shape = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.shape[1]; } } __pyx_t_7 = 0; __pyx_v_morphed_mask = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":139 * np.zeros([nx, ny], dtype=np.int8)) * * morphed_mask[mask > 0] = 1.0 # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 3] gradient_values = ( */ __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morphed_mask), __pyx_t_1, __pyx_float_1_0) < 0)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":142 * * cdef np.ndarray[float64, ndim = 3] gradient_values = ( * np.zeros([2, nx, ny], dtype=np.float64)) # <<<<<<<<<<<<<< * * cdef intp x, y */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 142, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { __pyx_v_gradient_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 141, __pyx_L1_error) } else {__pyx_pybuffernd_gradient_values.diminfo[0].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_gradient_values.diminfo[0].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_gradient_values.diminfo[1].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_gradient_values.diminfo[1].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_gradient_values.diminfo[2].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_gradient_values.diminfo[2].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[2]; } } __pyx_t_8 = 0; __pyx_v_gradient_values = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":146 * cdef intp x, y * * cdef intp x_max_int = nx - 1 # <<<<<<<<<<<<<< * cdef intp y_max_int = ny - 1 * */ __pyx_v_x_max_int = (__pyx_v_nx - 1); /* "pysteps/motion/_vet.pyx":147 * * cdef intp x_max_int = nx - 1 * cdef intp y_max_int = ny - 1 # <<<<<<<<<<<<<< * * cdef float64 x_max_float = <float64> x_max_int */ __pyx_v_y_max_int = (__pyx_v_ny - 1); /* "pysteps/motion/_vet.pyx":149 * cdef intp y_max_int = ny - 1 * * cdef float64 x_max_float = <float64> x_max_int # <<<<<<<<<<<<<< * cdef float64 y_max_float = <float64> y_max_int * */ __pyx_v_x_max_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x_max_int); /* "pysteps/motion/_vet.pyx":150 * * cdef float64 x_max_float = <float64> x_max_int * cdef float64 y_max_float = <float64> y_max_int # <<<<<<<<<<<<<< * * cdef float64 x_float, y_float, dx, dy */ __pyx_v_y_max_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y_max_int); /* "pysteps/motion/_vet.pyx":161 * cdef float64 f00, f10, f01, f11 * * for x in prange(nx, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * for y in range(ny): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_9 = __pyx_v_nx; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_11 = (__pyx_t_9 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_11 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_dx) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_f00) lastprivate(__pyx_v_f01) lastprivate(__pyx_v_f10) lastprivate(__pyx_v_f11) firstprivate(__pyx_v_x) lastprivate(__pyx_v_x) lastprivate(__pyx_v_x_ceil) lastprivate(__pyx_v_x_float) lastprivate(__pyx_v_x_floor) lastprivate(__pyx_v_y) lastprivate(__pyx_v_y_ceil) lastprivate(__pyx_v_y_float) lastprivate(__pyx_v_y_floor) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){ { __pyx_v_x = (__pyx_t_7pysteps_6motion_4_vet_intp)(0 + 1 * __pyx_t_10); /* Initialize private variables to invalid values */ __pyx_v_dx = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f00 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f01 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f10 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f11 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_x_ceil = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_x_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_x_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_y = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_y_ceil = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_y_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_y_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); /* "pysteps/motion/_vet.pyx":163 * for x in prange(nx, schedule='dynamic', nogil=True): * * for y in range(ny): # <<<<<<<<<<<<<< * * x_float = (<float64> x) - displacement[0, x, y] */ __pyx_t_12 = __pyx_v_ny; __pyx_t_13 = __pyx_t_12; for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_y = __pyx_t_14; /* "pysteps/motion/_vet.pyx":165 * for y in range(ny): * * x_float = (<float64> x) - displacement[0, x, y] # <<<<<<<<<<<<<< * y_float = (<float64> y) - displacement[1, x, y] * */ __pyx_t_15 = 0; __pyx_t_16 = __pyx_v_x; __pyx_t_17 = __pyx_v_y; __pyx_v_x_float = (((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_17, __pyx_pybuffernd_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":166 * * x_float = (<float64> x) - displacement[0, x, y] * y_float = (<float64> y) - displacement[1, x, y] # <<<<<<<<<<<<<< * * if x_float < 0: */ __pyx_t_18 = 1; __pyx_t_19 = __pyx_v_x; __pyx_t_20 = __pyx_v_y; __pyx_v_y_float = (((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_20, __pyx_pybuffernd_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":168 * y_float = (<float64> y) - displacement[1, x, y] * * if x_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = 0 */ __pyx_t_21 = ((__pyx_v_x_float < 0.0) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":169 * * if x_float < 0: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * x_float = 0 * x_floor = 0 */ __pyx_t_22 = __pyx_v_x; __pyx_t_23 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":170 * if x_float < 0: * morphed_mask[x, y] = 1 * x_float = 0 # <<<<<<<<<<<<<< * x_floor = 0 * x_ceil = 0 */ __pyx_v_x_float = 0.0; /* "pysteps/motion/_vet.pyx":171 * morphed_mask[x, y] = 1 * x_float = 0 * x_floor = 0 # <<<<<<<<<<<<<< * x_ceil = 0 * */ __pyx_v_x_floor = 0; /* "pysteps/motion/_vet.pyx":172 * x_float = 0 * x_floor = 0 * x_ceil = 0 # <<<<<<<<<<<<<< * * elif x_float > x_max_float: */ __pyx_v_x_ceil = 0; /* "pysteps/motion/_vet.pyx":168 * y_float = (<float64> y) - displacement[1, x, y] * * if x_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = 0 */ goto __pyx_L12; } /* "pysteps/motion/_vet.pyx":174 * x_ceil = 0 * * elif x_float > x_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = x_max_float */ __pyx_t_21 = ((__pyx_v_x_float > __pyx_v_x_max_float) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":175 * * elif x_float > x_max_float: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * x_float = x_max_float * x_floor = x_max_int */ __pyx_t_24 = __pyx_v_x; __pyx_t_25 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":176 * elif x_float > x_max_float: * morphed_mask[x, y] = 1 * x_float = x_max_float # <<<<<<<<<<<<<< * x_floor = x_max_int * x_ceil = x_max_int */ __pyx_v_x_float = __pyx_v_x_max_float; /* "pysteps/motion/_vet.pyx":177 * morphed_mask[x, y] = 1 * x_float = x_max_float * x_floor = x_max_int # <<<<<<<<<<<<<< * x_ceil = x_max_int * */ __pyx_v_x_floor = __pyx_v_x_max_int; /* "pysteps/motion/_vet.pyx":178 * x_float = x_max_float * x_floor = x_max_int * x_ceil = x_max_int # <<<<<<<<<<<<<< * * else: */ __pyx_v_x_ceil = __pyx_v_x_max_int; /* "pysteps/motion/_vet.pyx":174 * x_ceil = 0 * * elif x_float > x_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = x_max_float */ goto __pyx_L12; } /* "pysteps/motion/_vet.pyx":181 * * else: * x_floor = <intp> floor(x_float) # <<<<<<<<<<<<<< * x_ceil = x_floor + 1 * if x_ceil > x_max_int: */ /*else*/ { __pyx_v_x_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)floor(__pyx_v_x_float)); /* "pysteps/motion/_vet.pyx":182 * else: * x_floor = <intp> floor(x_float) * x_ceil = x_floor + 1 # <<<<<<<<<<<<<< * if x_ceil > x_max_int: * x_ceil = x_max_int */ __pyx_v_x_ceil = (__pyx_v_x_floor + 1); /* "pysteps/motion/_vet.pyx":183 * x_floor = <intp> floor(x_float) * x_ceil = x_floor + 1 * if x_ceil > x_max_int: # <<<<<<<<<<<<<< * x_ceil = x_max_int * */ __pyx_t_21 = ((__pyx_v_x_ceil > __pyx_v_x_max_int) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":184 * x_ceil = x_floor + 1 * if x_ceil > x_max_int: * x_ceil = x_max_int # <<<<<<<<<<<<<< * * if y_float < 0: */ __pyx_v_x_ceil = __pyx_v_x_max_int; /* "pysteps/motion/_vet.pyx":183 * x_floor = <intp> floor(x_float) * x_ceil = x_floor + 1 * if x_ceil > x_max_int: # <<<<<<<<<<<<<< * x_ceil = x_max_int * */ } } __pyx_L12:; /* "pysteps/motion/_vet.pyx":186 * x_ceil = x_max_int * * if y_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = 0 */ __pyx_t_21 = ((__pyx_v_y_float < 0.0) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":187 * * if y_float < 0: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * y_float = 0 * y_floor = 0 */ __pyx_t_26 = __pyx_v_x; __pyx_t_27 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":188 * if y_float < 0: * morphed_mask[x, y] = 1 * y_float = 0 # <<<<<<<<<<<<<< * y_floor = 0 * y_ceil = 0 */ __pyx_v_y_float = 0.0; /* "pysteps/motion/_vet.pyx":189 * morphed_mask[x, y] = 1 * y_float = 0 * y_floor = 0 # <<<<<<<<<<<<<< * y_ceil = 0 * elif y_float > y_max_float: */ __pyx_v_y_floor = 0; /* "pysteps/motion/_vet.pyx":190 * y_float = 0 * y_floor = 0 * y_ceil = 0 # <<<<<<<<<<<<<< * elif y_float > y_max_float: * morphed_mask[x, y] = 1 */ __pyx_v_y_ceil = 0; /* "pysteps/motion/_vet.pyx":186 * x_ceil = x_max_int * * if y_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = 0 */ goto __pyx_L14; } /* "pysteps/motion/_vet.pyx":191 * y_floor = 0 * y_ceil = 0 * elif y_float > y_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = y_max_float */ __pyx_t_21 = ((__pyx_v_y_float > __pyx_v_y_max_float) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":192 * y_ceil = 0 * elif y_float > y_max_float: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * y_float = y_max_float * y_floor = y_max_int */ __pyx_t_28 = __pyx_v_x; __pyx_t_29 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_29, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":193 * elif y_float > y_max_float: * morphed_mask[x, y] = 1 * y_float = y_max_float # <<<<<<<<<<<<<< * y_floor = y_max_int * y_ceil = y_max_int */ __pyx_v_y_float = __pyx_v_y_max_float; /* "pysteps/motion/_vet.pyx":194 * morphed_mask[x, y] = 1 * y_float = y_max_float * y_floor = y_max_int # <<<<<<<<<<<<<< * y_ceil = y_max_int * else: */ __pyx_v_y_floor = __pyx_v_y_max_int; /* "pysteps/motion/_vet.pyx":195 * y_float = y_max_float * y_floor = y_max_int * y_ceil = y_max_int # <<<<<<<<<<<<<< * else: * y_floor = <intp> floor(y_float) */ __pyx_v_y_ceil = __pyx_v_y_max_int; /* "pysteps/motion/_vet.pyx":191 * y_floor = 0 * y_ceil = 0 * elif y_float > y_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = y_max_float */ goto __pyx_L14; } /* "pysteps/motion/_vet.pyx":197 * y_ceil = y_max_int * else: * y_floor = <intp> floor(y_float) # <<<<<<<<<<<<<< * y_ceil = y_floor + 1 * if y_ceil > y_max_int: */ /*else*/ { __pyx_v_y_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)floor(__pyx_v_y_float)); /* "pysteps/motion/_vet.pyx":198 * else: * y_floor = <intp> floor(y_float) * y_ceil = y_floor + 1 # <<<<<<<<<<<<<< * if y_ceil > y_max_int: * y_ceil = y_max_int */ __pyx_v_y_ceil = (__pyx_v_y_floor + 1); /* "pysteps/motion/_vet.pyx":199 * y_floor = <intp> floor(y_float) * y_ceil = y_floor + 1 * if y_ceil > y_max_int: # <<<<<<<<<<<<<< * y_ceil = y_max_int * */ __pyx_t_21 = ((__pyx_v_y_ceil > __pyx_v_y_max_int) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":200 * y_ceil = y_floor + 1 * if y_ceil > y_max_int: * y_ceil = y_max_int # <<<<<<<<<<<<<< * * dx = x_float - <float64> x_floor */ __pyx_v_y_ceil = __pyx_v_y_max_int; /* "pysteps/motion/_vet.pyx":199 * y_floor = <intp> floor(y_float) * y_ceil = y_floor + 1 * if y_ceil > y_max_int: # <<<<<<<<<<<<<< * y_ceil = y_max_int * */ } } __pyx_L14:; /* "pysteps/motion/_vet.pyx":202 * y_ceil = y_max_int * * dx = x_float - <float64> x_floor # <<<<<<<<<<<<<< * dy = y_float - <float64> y_floor * */ __pyx_v_dx = (__pyx_v_x_float - ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x_floor)); /* "pysteps/motion/_vet.pyx":203 * * dx = x_float - <float64> x_floor * dy = y_float - <float64> y_floor # <<<<<<<<<<<<<< * * # This assumes that the spacing between grid points=1. */ __pyx_v_dy = (__pyx_v_y_float - ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y_floor)); /* "pysteps/motion/_vet.pyx":208 * * # Bilinear interpolation coeficients * f00 = image[x_floor, y_floor] # <<<<<<<<<<<<<< * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] */ __pyx_t_30 = __pyx_v_x_floor; __pyx_t_31 = __pyx_v_y_floor; __pyx_v_f00 = (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_image.diminfo[1].strides)); /* "pysteps/motion/_vet.pyx":209 * # Bilinear interpolation coeficients * f00 = image[x_floor, y_floor] * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] # <<<<<<<<<<<<<< * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] */ __pyx_t_32 = __pyx_v_x_ceil; __pyx_t_33 = __pyx_v_y_floor; __pyx_t_34 = __pyx_v_x_floor; __pyx_t_35 = __pyx_v_y_floor; __pyx_v_f10 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_32, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_33, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_35, __pyx_pybuffernd_image.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":210 * f00 = image[x_floor, y_floor] * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] # <<<<<<<<<<<<<< * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] * - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) */ __pyx_t_36 = __pyx_v_x_floor; __pyx_t_37 = __pyx_v_y_ceil; __pyx_t_38 = __pyx_v_x_floor; __pyx_t_39 = __pyx_v_y_floor; __pyx_v_f01 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_39, __pyx_pybuffernd_image.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":211 * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] # <<<<<<<<<<<<<< * - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) * */ __pyx_t_40 = __pyx_v_x_floor; __pyx_t_41 = __pyx_v_y_floor; __pyx_t_42 = __pyx_v_x_ceil; __pyx_t_43 = __pyx_v_y_floor; /* "pysteps/motion/_vet.pyx":212 * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] * - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) # <<<<<<<<<<<<<< * * # Bilinear interpolation */ __pyx_t_44 = __pyx_v_x_floor; __pyx_t_45 = __pyx_v_y_ceil; __pyx_t_46 = __pyx_v_x_ceil; __pyx_t_47 = __pyx_v_y_ceil; __pyx_v_f11 = ((((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_41, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_42, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_43, __pyx_pybuffernd_image.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_44, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_45, __pyx_pybuffernd_image.diminfo[1].strides))) + (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_image.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":215 * * # Bilinear interpolation * new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 # <<<<<<<<<<<<<< * * if gradient: */ __pyx_t_48 = __pyx_v_x; __pyx_t_49 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_new_image.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_new_image.diminfo[0].strides, __pyx_t_49, __pyx_pybuffernd_new_image.diminfo[1].strides) = (((__pyx_v_f00 + (__pyx_v_dx * __pyx_v_f10)) + (__pyx_v_dy * __pyx_v_f01)) + ((__pyx_v_dx * __pyx_v_dy) * __pyx_v_f11)); /* "pysteps/motion/_vet.pyx":217 * new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 * * if gradient: # <<<<<<<<<<<<<< * gradient_values[0, x, y] = f10 + dy * f11 * gradient_values[1, x, y] = f01 + dx * f11 */ __pyx_t_21 = (__pyx_v_gradient != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":218 * * if gradient: * gradient_values[0, x, y] = f10 + dy * f11 # <<<<<<<<<<<<<< * gradient_values[1, x, y] = f01 + dx * f11 * */ __pyx_t_50 = 0; __pyx_t_51 = __pyx_v_x; __pyx_t_52 = __pyx_v_y; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_gradient_values.diminfo[0].strides, __pyx_t_51, __pyx_pybuffernd_gradient_values.diminfo[1].strides, __pyx_t_52, __pyx_pybuffernd_gradient_values.diminfo[2].strides) = (__pyx_v_f10 + (__pyx_v_dy * __pyx_v_f11)); /* "pysteps/motion/_vet.pyx":219 * if gradient: * gradient_values[0, x, y] = f10 + dy * f11 * gradient_values[1, x, y] = f01 + dx * f11 # <<<<<<<<<<<<<< * * f00 = mask[x_floor, y_floor] */ __pyx_t_53 = 1; __pyx_t_54 = __pyx_v_x; __pyx_t_55 = __pyx_v_y; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_gradient_values.diminfo[0].strides, __pyx_t_54, __pyx_pybuffernd_gradient_values.diminfo[1].strides, __pyx_t_55, __pyx_pybuffernd_gradient_values.diminfo[2].strides) = (__pyx_v_f01 + (__pyx_v_dx * __pyx_v_f11)); /* "pysteps/motion/_vet.pyx":217 * new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 * * if gradient: # <<<<<<<<<<<<<< * gradient_values[0, x, y] = f10 + dy * f11 * gradient_values[1, x, y] = f01 + dx * f11 */ } /* "pysteps/motion/_vet.pyx":221 * gradient_values[1, x, y] = f01 + dx * f11 * * f00 = mask[x_floor, y_floor] # <<<<<<<<<<<<<< * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] */ __pyx_t_56 = __pyx_v_x_floor; __pyx_t_57 = __pyx_v_y_floor; __pyx_v_f00 = (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_56, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_57, __pyx_pybuffernd_mask.diminfo[1].strides)); /* "pysteps/motion/_vet.pyx":222 * * f00 = mask[x_floor, y_floor] * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] # <<<<<<<<<<<<<< * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] */ __pyx_t_58 = __pyx_v_x_ceil; __pyx_t_59 = __pyx_v_y_floor; __pyx_t_60 = __pyx_v_x_floor; __pyx_t_61 = __pyx_v_y_floor; __pyx_v_f10 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_59, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_61, __pyx_pybuffernd_mask.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":223 * f00 = mask[x_floor, y_floor] * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] # <<<<<<<<<<<<<< * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) */ __pyx_t_62 = __pyx_v_x_floor; __pyx_t_63 = __pyx_v_y_ceil; __pyx_t_64 = __pyx_v_x_floor; __pyx_t_65 = __pyx_v_y_floor; __pyx_v_f01 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_63, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_65, __pyx_pybuffernd_mask.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":224 * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] # <<<<<<<<<<<<<< * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) * */ __pyx_t_66 = __pyx_v_x_floor; __pyx_t_67 = __pyx_v_y_floor; __pyx_t_68 = __pyx_v_x_ceil; __pyx_t_69 = __pyx_v_y_floor; /* "pysteps/motion/_vet.pyx":225 * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) # <<<<<<<<<<<<<< * * morphed_mask[x, y] = <int8> (f00 + dx * f10 + dy * f01 */ __pyx_t_70 = __pyx_v_x_floor; __pyx_t_71 = __pyx_v_y_ceil; __pyx_t_72 = __pyx_v_x_ceil; __pyx_t_73 = __pyx_v_y_ceil; __pyx_v_f11 = ((((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_66, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_67, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_69, __pyx_pybuffernd_mask.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_71, __pyx_pybuffernd_mask.diminfo[1].strides))) + (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_73, __pyx_pybuffernd_mask.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":227 * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) * * morphed_mask[x, y] = <int8> (f00 + dx * f10 + dy * f01 # <<<<<<<<<<<<<< * + dx * dy * f11) * */ __pyx_t_74 = __pyx_v_x; __pyx_t_75 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_75, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = ((__pyx_t_7pysteps_6motion_4_vet_int8)(((__pyx_v_f00 + (__pyx_v_dx * __pyx_v_f10)) + (__pyx_v_dy * __pyx_v_f01)) + ((__pyx_v_dx * __pyx_v_dy) * __pyx_v_f11))); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "pysteps/motion/_vet.pyx":161 * cdef float64 f00, f10, f01, f11 * * for x in prange(nx, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * for y in range(ny): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "pysteps/motion/_vet.pyx":230 * + dx * dy * f11) * * morphed_mask[morphed_mask != 0] = 1 # <<<<<<<<<<<<<< * if gradient: * return new_image, morphed_mask, gradient_values */ __pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_morphed_mask), __pyx_int_0, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 230, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morphed_mask), __pyx_t_5, __pyx_int_1) < 0)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":231 * * morphed_mask[morphed_mask != 0] = 1 * if gradient: # <<<<<<<<<<<<<< * return new_image, morphed_mask, gradient_values * else: */ __pyx_t_21 = (__pyx_v_gradient != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":232 * morphed_mask[morphed_mask != 0] = 1 * if gradient: * return new_image, morphed_mask, gradient_values # <<<<<<<<<<<<<< * else: * return new_image, morphed_mask */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_new_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_new_image)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_new_image)); __Pyx_INCREF(((PyObject *)__pyx_v_morphed_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_morphed_mask)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_morphed_mask)); __Pyx_INCREF(((PyObject *)__pyx_v_gradient_values)); __Pyx_GIVEREF(((PyObject *)__pyx_v_gradient_values)); PyTuple_SET_ITEM(__pyx_t_5, 2, ((PyObject *)__pyx_v_gradient_values)); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "pysteps/motion/_vet.pyx":231 * * morphed_mask[morphed_mask != 0] = 1 * if gradient: # <<<<<<<<<<<<<< * return new_image, morphed_mask, gradient_values * else: */ } /* "pysteps/motion/_vet.pyx":234 * return new_image, morphed_mask, gradient_values * else: * return new_image, morphed_mask # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_new_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_new_image)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_new_image)); __Pyx_INCREF(((PyObject *)__pyx_v_morphed_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_morphed_mask)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_morphed_mask)); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; } /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("pysteps.motion._vet._warp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_new_image); __Pyx_XDECREF((PyObject *)__pyx_v_morphed_mask); __Pyx_XDECREF((PyObject *)__pyx_v_gradient_values); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ /* Python wrapper */ static PyObject *__pyx_pw_7pysteps_6motion_4_vet_3_cost_function(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7pysteps_6motion_4_vet_2_cost_function[] = "\n Variational Echo Tracking Cost function.\n \n This function computes the Variational Echo Tracking (VET) \n Cost function presented by `Laroche and Zawazdki (1995)`_ and used in the \n McGill Algorithm for Prediction by Lagrangian Extrapolation (MAPLE) \n described in\n `Germann and Zawadzki (2002)`_.\n \n \n .. _`Laroche and Zawazdki (1995)`: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n \n .. _`Germann and Zawadzki (2002)`: http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2\n \n \n The cost function is a the sum of the residuals of the squared image \n differences along with a smoothness constrain. \n \n This cost function implementation, supports displacement vector \n sectorization.\n The displacement vector represent the displacement applied to the pixels in\n each individual sector.\n \n This help to reduce the number of degrees of freedom of the cost function \n when hierarchical approaches are used to obtain the minima of \n the cost function (from low resolution to full image resolution).\n For example, in the MAPLE algorithm an Scaling Guess procedure is used to \n find the displacement vectors.\n The echo motion field is retrieved in three runs with increasing resolution.\n The retrieval starts with (left) a uniform field, which is used as a first \n guess to retrieve (middle) the field on a 5 \303\227 5 grid, which in turn is the \n first guess of (right) the final minimization with a 25 \303\227 25 grid\n \n The shape of the sector is deduced from the image shape and the displacement\n vector shape. \n \n IMPORTANT: The number of sectors in each dimension (x and y) must be a \n factor full image size.\n \n The value of displaced pixels that fall outside the limits takes the \n value of the nearest edge.\n \n The cost function is computed in parallel over the x axis.""\n \n .. _ndarray: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html\n \n Parameters\n ----------\n \n sector_displacement : ndarray_ (ndim=3) \n Array of displacements to apply to each sector. The dimensions are:\n sector_displacement [ x (0) or y (1) displacement, \n i index of sector, j index of sector ] \n \n \n template_image : ndarray_ (ndim=2)\n Input image array where the sector displacement is applied.\n \n input_image : ndarray_\n Image array to be used as reference \n \n smooth_gain : float\n Smoothness constrain gain\n\n mask : ndarray_ (ndim=2)\n Data mask. If is True, the data is marked as not valid and is not\n used in the computations.\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n Returns\n -------\n \n penalty or gradient values.\n\n penalty : float\n Value of the cost function\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n \n \n References\n ----------\n \n Laroche, S., and I. Zawadzki, 1995: \n Retrievals of horizontal winds from single-Doppler clear-air data by methods\n of cross-correlation and variational analysis. \n J. Atmos. Oceanic Technol., 12, 721\342\200\223738.\n doi: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n \n Germann, U. and I. Zawadzki, 2002: \n Scale-Dependence of the Predictability of Precipitation from Continental \n Radar Images.\n Part I: Description of the Methodology. Mon. Wea. Rev., 130, 2859\342\200\2232873,\n doi: 10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2. \n \n "; static PyMethodDef __pyx_mdef_7pysteps_6motion_4_vet_3_cost_function = {"_cost_function", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7pysteps_6motion_4_vet_3_cost_function, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7pysteps_6motion_4_vet_2_cost_function}; static PyObject *__pyx_pw_7pysteps_6motion_4_vet_3_cost_function(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_sector_displacement = 0; PyArrayObject *__pyx_v_template_image = 0; PyArrayObject *__pyx_v_input_image = 0; PyArrayObject *__pyx_v_mask = 0; float __pyx_v_smooth_gain; int __pyx_v_gradient; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cost_function (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sector_displacement,&__pyx_n_s_template_image,&__pyx_n_s_input_image,&__pyx_n_s_mask,&__pyx_n_s_smooth_gain,&__pyx_n_s_gradient,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sector_displacement)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_template_image)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 1); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_input_image)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 2); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 3); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_smooth_gain)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 4); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cost_function") < 0)) __PYX_ERR(0, 240, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_sector_displacement = ((PyArrayObject *)values[0]); __pyx_v_template_image = ((PyArrayObject *)values[1]); __pyx_v_input_image = ((PyArrayObject *)values[2]); __pyx_v_mask = ((PyArrayObject *)values[3]); __pyx_v_smooth_gain = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_smooth_gain == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 244, __pyx_L3_error) if (values[5]) { __pyx_v_gradient = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_gradient == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 245, __pyx_L3_error) } else { /* "pysteps/motion/_vet.pyx":245 * np.ndarray[int8, ndim=2] mask, * float smooth_gain, * bint gradient = False): # <<<<<<<<<<<<<< * """ * Variational Echo Tracking Cost function. */ __pyx_v_gradient = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 240, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pysteps.motion._vet._cost_function", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sector_displacement), __pyx_ptype_5numpy_ndarray, 1, "sector_displacement", 0))) __PYX_ERR(0, 240, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_template_image), __pyx_ptype_5numpy_ndarray, 1, "template_image", 0))) __PYX_ERR(0, 241, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_input_image), __pyx_ptype_5numpy_ndarray, 1, "input_image", 0))) __PYX_ERR(0, 242, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 243, __pyx_L1_error) __pyx_r = __pyx_pf_7pysteps_6motion_4_vet_2_cost_function(__pyx_self, __pyx_v_sector_displacement, __pyx_v_template_image, __pyx_v_input_image, __pyx_v_mask, __pyx_v_smooth_gain, __pyx_v_gradient); /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7pysteps_6motion_4_vet_2_cost_function(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_sector_displacement, PyArrayObject *__pyx_v_template_image, PyArrayObject *__pyx_v_input_image, PyArrayObject *__pyx_v_mask, float __pyx_v_smooth_gain, int __pyx_v_gradient) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_sectors; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_sectors; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_image_size; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_image_size; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_sector_size; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_sector_size; PyArrayObject *__pyx_v_displacement = 0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_i; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_j; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_xy; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l1; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m1; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_i_shift; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_j_shift; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_axis; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_x_guess = 0; PyArrayObject *__pyx_v_y_guess = 0; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_sector_area; PyArrayObject *__pyx_v_interp_coef = 0; PyArrayObject *__pyx_v_l_i = 0; PyArrayObject *__pyx_v_m_j = 0; PyArrayObject *__pyx_v_i_min = 0; PyArrayObject *__pyx_v_i_max = 0; PyArrayObject *__pyx_v_j_min = 0; PyArrayObject *__pyx_v_j_max = 0; PyObject *__pyx_v_counts = NULL; PyArrayObject *__pyx_v_morphed_image = 0; PyArrayObject *__pyx_v_morph_mask = 0; PyArrayObject *__pyx_v__gradient_data = 0; PyArrayObject *__pyx_v_grad_residuals = 0; PyArrayObject *__pyx_v_grad_smooth = 0; PyArrayObject *__pyx_v_buffer = 0; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_residuals; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_smoothness_penalty; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dx2; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dxdy; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dy2; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_inloop_smoothness_penalty; __Pyx_LocalBuf_ND __pyx_pybuffernd__gradient_data; __Pyx_Buffer __pyx_pybuffer__gradient_data; __Pyx_LocalBuf_ND __pyx_pybuffernd_buffer; __Pyx_Buffer __pyx_pybuffer_buffer; __Pyx_LocalBuf_ND __pyx_pybuffernd_displacement; __Pyx_Buffer __pyx_pybuffer_displacement; __Pyx_LocalBuf_ND __pyx_pybuffernd_grad_residuals; __Pyx_Buffer __pyx_pybuffer_grad_residuals; __Pyx_LocalBuf_ND __pyx_pybuffernd_grad_smooth; __Pyx_Buffer __pyx_pybuffer_grad_smooth; __Pyx_LocalBuf_ND __pyx_pybuffernd_i_max; __Pyx_Buffer __pyx_pybuffer_i_max; __Pyx_LocalBuf_ND __pyx_pybuffernd_i_min; __Pyx_Buffer __pyx_pybuffer_i_min; __Pyx_LocalBuf_ND __pyx_pybuffernd_input_image; __Pyx_Buffer __pyx_pybuffer_input_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_interp_coef; __Pyx_Buffer __pyx_pybuffer_interp_coef; __Pyx_LocalBuf_ND __pyx_pybuffernd_j_max; __Pyx_Buffer __pyx_pybuffer_j_max; __Pyx_LocalBuf_ND __pyx_pybuffernd_j_min; __Pyx_Buffer __pyx_pybuffer_j_min; __Pyx_LocalBuf_ND __pyx_pybuffernd_l_i; __Pyx_Buffer __pyx_pybuffer_l_i; __Pyx_LocalBuf_ND __pyx_pybuffernd_m_j; __Pyx_Buffer __pyx_pybuffer_m_j; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask; __Pyx_Buffer __pyx_pybuffer_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_morph_mask; __Pyx_Buffer __pyx_pybuffer_morph_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_morphed_image; __Pyx_Buffer __pyx_pybuffer_morphed_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_sector_displacement; __Pyx_Buffer __pyx_pybuffer_sector_displacement; __Pyx_LocalBuf_ND __pyx_pybuffernd_template_image; __Pyx_Buffer __pyx_pybuffer_template_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_x_guess; __Pyx_Buffer __pyx_pybuffer_x_guess; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_y_guess; __Pyx_Buffer __pyx_pybuffer_y_guess; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyArrayObject *__pyx_t_13 = NULL; PyArrayObject *__pyx_t_14 = NULL; PyArrayObject *__pyx_t_15 = NULL; PyArrayObject *__pyx_t_16 = NULL; PyArrayObject *__pyx_t_17 = NULL; PyArrayObject *__pyx_t_18 = NULL; PyArrayObject *__pyx_t_19 = NULL; PyArrayObject *__pyx_t_20 = NULL; PyArrayObject *__pyx_t_21 = NULL; PyArrayObject *__pyx_t_22 = NULL; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_23; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_24; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_25; Py_ssize_t __pyx_t_26; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_27; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_28; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; Py_ssize_t __pyx_t_45; Py_ssize_t __pyx_t_46; Py_ssize_t __pyx_t_47; Py_ssize_t __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; Py_ssize_t __pyx_t_53; Py_ssize_t __pyx_t_54; Py_ssize_t __pyx_t_55; Py_ssize_t __pyx_t_56; Py_ssize_t __pyx_t_57; Py_ssize_t __pyx_t_58; Py_ssize_t __pyx_t_59; Py_ssize_t __pyx_t_60; Py_ssize_t __pyx_t_61; Py_ssize_t __pyx_t_62; Py_ssize_t __pyx_t_63; Py_ssize_t __pyx_t_64; Py_ssize_t __pyx_t_65; Py_ssize_t __pyx_t_66; Py_ssize_t __pyx_t_67; Py_ssize_t __pyx_t_68; Py_ssize_t __pyx_t_69; Py_ssize_t __pyx_t_70; Py_ssize_t __pyx_t_71; Py_ssize_t __pyx_t_72; Py_ssize_t __pyx_t_73; Py_ssize_t __pyx_t_74; Py_ssize_t __pyx_t_75; Py_ssize_t __pyx_t_76; Py_ssize_t __pyx_t_77; Py_ssize_t __pyx_t_78; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_79; Py_ssize_t __pyx_t_80; Py_ssize_t __pyx_t_81; Py_ssize_t __pyx_t_82; Py_ssize_t __pyx_t_83; Py_ssize_t __pyx_t_84; Py_ssize_t __pyx_t_85; Py_ssize_t __pyx_t_86; Py_ssize_t __pyx_t_87; Py_ssize_t __pyx_t_88; Py_ssize_t __pyx_t_89; Py_ssize_t __pyx_t_90; Py_ssize_t __pyx_t_91; Py_ssize_t __pyx_t_92; Py_ssize_t __pyx_t_93; Py_ssize_t __pyx_t_94; Py_ssize_t __pyx_t_95; Py_ssize_t __pyx_t_96; Py_ssize_t __pyx_t_97; Py_ssize_t __pyx_t_98; Py_ssize_t __pyx_t_99; Py_ssize_t __pyx_t_100; Py_ssize_t __pyx_t_101; Py_ssize_t __pyx_t_102; Py_ssize_t __pyx_t_103; Py_ssize_t __pyx_t_104; Py_ssize_t __pyx_t_105; Py_ssize_t __pyx_t_106; Py_ssize_t __pyx_t_107; PyObject *(*__pyx_t_108)(PyObject *); PyObject *__pyx_t_109 = NULL; PyObject *(*__pyx_t_110)(PyObject *); Py_ssize_t __pyx_t_111; Py_ssize_t __pyx_t_112; Py_ssize_t __pyx_t_113; Py_ssize_t __pyx_t_114; PyArrayObject *__pyx_t_115 = NULL; PyArrayObject *__pyx_t_116 = NULL; PyArrayObject *__pyx_t_117 = NULL; PyArrayObject *__pyx_t_118 = NULL; PyArrayObject *__pyx_t_119 = NULL; PyArrayObject *__pyx_t_120 = NULL; PyObject *__pyx_t_121 = NULL; Py_ssize_t __pyx_t_122; Py_ssize_t __pyx_t_123; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_124; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_125; Py_ssize_t __pyx_t_126; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_127; Py_ssize_t __pyx_t_128; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_129; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_130; Py_ssize_t __pyx_t_131; Py_ssize_t __pyx_t_132; Py_ssize_t __pyx_t_133; Py_ssize_t __pyx_t_134; Py_ssize_t __pyx_t_135; Py_ssize_t __pyx_t_136; Py_ssize_t __pyx_t_137; Py_ssize_t __pyx_t_138; Py_ssize_t __pyx_t_139; Py_ssize_t __pyx_t_140; Py_ssize_t __pyx_t_141; Py_ssize_t __pyx_t_142; Py_ssize_t __pyx_t_143; Py_ssize_t __pyx_t_144; Py_ssize_t __pyx_t_145; Py_ssize_t __pyx_t_146; Py_ssize_t __pyx_t_147; Py_ssize_t __pyx_t_148; Py_ssize_t __pyx_t_149; Py_ssize_t __pyx_t_150; Py_ssize_t __pyx_t_151; Py_ssize_t __pyx_t_152; Py_ssize_t __pyx_t_153; Py_ssize_t __pyx_t_154; Py_ssize_t __pyx_t_155; Py_ssize_t __pyx_t_156; Py_ssize_t __pyx_t_157; Py_ssize_t __pyx_t_158; Py_ssize_t __pyx_t_159; Py_ssize_t __pyx_t_160; Py_ssize_t __pyx_t_161; Py_ssize_t __pyx_t_162; Py_ssize_t __pyx_t_163; Py_ssize_t __pyx_t_164; Py_ssize_t __pyx_t_165; Py_ssize_t __pyx_t_166; Py_ssize_t __pyx_t_167; Py_ssize_t __pyx_t_168; Py_ssize_t __pyx_t_169; Py_ssize_t __pyx_t_170; Py_ssize_t __pyx_t_171; Py_ssize_t __pyx_t_172; Py_ssize_t __pyx_t_173; Py_ssize_t __pyx_t_174; Py_ssize_t __pyx_t_175; Py_ssize_t __pyx_t_176; Py_ssize_t __pyx_t_177; Py_ssize_t __pyx_t_178; Py_ssize_t __pyx_t_179; Py_ssize_t __pyx_t_180; Py_ssize_t __pyx_t_181; Py_ssize_t __pyx_t_182; Py_ssize_t __pyx_t_183; Py_ssize_t __pyx_t_184; Py_ssize_t __pyx_t_185; Py_ssize_t __pyx_t_186; Py_ssize_t __pyx_t_187; Py_ssize_t __pyx_t_188; Py_ssize_t __pyx_t_189; Py_ssize_t __pyx_t_190; Py_ssize_t __pyx_t_191; Py_ssize_t __pyx_t_192; Py_ssize_t __pyx_t_193; Py_ssize_t __pyx_t_194; Py_ssize_t __pyx_t_195; Py_ssize_t __pyx_t_196; Py_ssize_t __pyx_t_197; Py_ssize_t __pyx_t_198; Py_ssize_t __pyx_t_199; Py_ssize_t __pyx_t_200; Py_ssize_t __pyx_t_201; Py_ssize_t __pyx_t_202; Py_ssize_t __pyx_t_203; Py_ssize_t __pyx_t_204; Py_ssize_t __pyx_t_205; Py_ssize_t __pyx_t_206; Py_ssize_t __pyx_t_207; Py_ssize_t __pyx_t_208; Py_ssize_t __pyx_t_209; Py_ssize_t __pyx_t_210; Py_ssize_t __pyx_t_211; Py_ssize_t __pyx_t_212; Py_ssize_t __pyx_t_213; Py_ssize_t __pyx_t_214; Py_ssize_t __pyx_t_215; Py_ssize_t __pyx_t_216; Py_ssize_t __pyx_t_217; Py_ssize_t __pyx_t_218; Py_ssize_t __pyx_t_219; Py_ssize_t __pyx_t_220; Py_ssize_t __pyx_t_221; Py_ssize_t __pyx_t_222; Py_ssize_t __pyx_t_223; Py_ssize_t __pyx_t_224; Py_ssize_t __pyx_t_225; Py_ssize_t __pyx_t_226; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_t_227; long __pyx_t_228; long __pyx_t_229; long __pyx_t_230; long __pyx_t_231; Py_ssize_t __pyx_t_232; Py_ssize_t __pyx_t_233; Py_ssize_t __pyx_t_234; Py_ssize_t __pyx_t_235; Py_ssize_t __pyx_t_236; Py_ssize_t __pyx_t_237; Py_ssize_t __pyx_t_238; Py_ssize_t __pyx_t_239; Py_ssize_t __pyx_t_240; Py_ssize_t __pyx_t_241; Py_ssize_t __pyx_t_242; Py_ssize_t __pyx_t_243; Py_ssize_t __pyx_t_244; Py_ssize_t __pyx_t_245; Py_ssize_t __pyx_t_246; Py_ssize_t __pyx_t_247; Py_ssize_t __pyx_t_248; Py_ssize_t __pyx_t_249; Py_ssize_t __pyx_t_250; Py_ssize_t __pyx_t_251; Py_ssize_t __pyx_t_252; Py_ssize_t __pyx_t_253; Py_ssize_t __pyx_t_254; Py_ssize_t __pyx_t_255; Py_ssize_t __pyx_t_256; Py_ssize_t __pyx_t_257; Py_ssize_t __pyx_t_258; Py_ssize_t __pyx_t_259; Py_ssize_t __pyx_t_260; Py_ssize_t __pyx_t_261; Py_ssize_t __pyx_t_262; Py_ssize_t __pyx_t_263; Py_ssize_t __pyx_t_264; Py_ssize_t __pyx_t_265; Py_ssize_t __pyx_t_266; Py_ssize_t __pyx_t_267; Py_ssize_t __pyx_t_268; Py_ssize_t __pyx_t_269; Py_ssize_t __pyx_t_270; Py_ssize_t __pyx_t_271; Py_ssize_t __pyx_t_272; Py_ssize_t __pyx_t_273; Py_ssize_t __pyx_t_274; Py_ssize_t __pyx_t_275; Py_ssize_t __pyx_t_276; Py_ssize_t __pyx_t_277; Py_ssize_t __pyx_t_278; Py_ssize_t __pyx_t_279; Py_ssize_t __pyx_t_280; Py_ssize_t __pyx_t_281; Py_ssize_t __pyx_t_282; Py_ssize_t __pyx_t_283; Py_ssize_t __pyx_t_284; Py_ssize_t __pyx_t_285; Py_ssize_t __pyx_t_286; Py_ssize_t __pyx_t_287; Py_ssize_t __pyx_t_288; Py_ssize_t __pyx_t_289; Py_ssize_t __pyx_t_290; Py_ssize_t __pyx_t_291; __Pyx_RefNannySetupContext("_cost_function", 0); __pyx_pybuffer_displacement.pybuffer.buf = NULL; __pyx_pybuffer_displacement.refcount = 0; __pyx_pybuffernd_displacement.data = NULL; __pyx_pybuffernd_displacement.rcbuffer = &__pyx_pybuffer_displacement; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_x_guess.pybuffer.buf = NULL; __pyx_pybuffer_x_guess.refcount = 0; __pyx_pybuffernd_x_guess.data = NULL; __pyx_pybuffernd_x_guess.rcbuffer = &__pyx_pybuffer_x_guess; __pyx_pybuffer_y_guess.pybuffer.buf = NULL; __pyx_pybuffer_y_guess.refcount = 0; __pyx_pybuffernd_y_guess.data = NULL; __pyx_pybuffernd_y_guess.rcbuffer = &__pyx_pybuffer_y_guess; __pyx_pybuffer_interp_coef.pybuffer.buf = NULL; __pyx_pybuffer_interp_coef.refcount = 0; __pyx_pybuffernd_interp_coef.data = NULL; __pyx_pybuffernd_interp_coef.rcbuffer = &__pyx_pybuffer_interp_coef; __pyx_pybuffer_l_i.pybuffer.buf = NULL; __pyx_pybuffer_l_i.refcount = 0; __pyx_pybuffernd_l_i.data = NULL; __pyx_pybuffernd_l_i.rcbuffer = &__pyx_pybuffer_l_i; __pyx_pybuffer_m_j.pybuffer.buf = NULL; __pyx_pybuffer_m_j.refcount = 0; __pyx_pybuffernd_m_j.data = NULL; __pyx_pybuffernd_m_j.rcbuffer = &__pyx_pybuffer_m_j; __pyx_pybuffer_i_min.pybuffer.buf = NULL; __pyx_pybuffer_i_min.refcount = 0; __pyx_pybuffernd_i_min.data = NULL; __pyx_pybuffernd_i_min.rcbuffer = &__pyx_pybuffer_i_min; __pyx_pybuffer_i_max.pybuffer.buf = NULL; __pyx_pybuffer_i_max.refcount = 0; __pyx_pybuffernd_i_max.data = NULL; __pyx_pybuffernd_i_max.rcbuffer = &__pyx_pybuffer_i_max; __pyx_pybuffer_j_min.pybuffer.buf = NULL; __pyx_pybuffer_j_min.refcount = 0; __pyx_pybuffernd_j_min.data = NULL; __pyx_pybuffernd_j_min.rcbuffer = &__pyx_pybuffer_j_min; __pyx_pybuffer_j_max.pybuffer.buf = NULL; __pyx_pybuffer_j_max.refcount = 0; __pyx_pybuffernd_j_max.data = NULL; __pyx_pybuffernd_j_max.rcbuffer = &__pyx_pybuffer_j_max; __pyx_pybuffer_morphed_image.pybuffer.buf = NULL; __pyx_pybuffer_morphed_image.refcount = 0; __pyx_pybuffernd_morphed_image.data = NULL; __pyx_pybuffernd_morphed_image.rcbuffer = &__pyx_pybuffer_morphed_image; __pyx_pybuffer_morph_mask.pybuffer.buf = NULL; __pyx_pybuffer_morph_mask.refcount = 0; __pyx_pybuffernd_morph_mask.data = NULL; __pyx_pybuffernd_morph_mask.rcbuffer = &__pyx_pybuffer_morph_mask; __pyx_pybuffer__gradient_data.pybuffer.buf = NULL; __pyx_pybuffer__gradient_data.refcount = 0; __pyx_pybuffernd__gradient_data.data = NULL; __pyx_pybuffernd__gradient_data.rcbuffer = &__pyx_pybuffer__gradient_data; __pyx_pybuffer_grad_residuals.pybuffer.buf = NULL; __pyx_pybuffer_grad_residuals.refcount = 0; __pyx_pybuffernd_grad_residuals.data = NULL; __pyx_pybuffernd_grad_residuals.rcbuffer = &__pyx_pybuffer_grad_residuals; __pyx_pybuffer_grad_smooth.pybuffer.buf = NULL; __pyx_pybuffer_grad_smooth.refcount = 0; __pyx_pybuffernd_grad_smooth.data = NULL; __pyx_pybuffernd_grad_smooth.rcbuffer = &__pyx_pybuffer_grad_smooth; __pyx_pybuffer_buffer.pybuffer.buf = NULL; __pyx_pybuffer_buffer.refcount = 0; __pyx_pybuffernd_buffer.data = NULL; __pyx_pybuffernd_buffer.rcbuffer = &__pyx_pybuffer_buffer; __pyx_pybuffer_sector_displacement.pybuffer.buf = NULL; __pyx_pybuffer_sector_displacement.refcount = 0; __pyx_pybuffernd_sector_displacement.data = NULL; __pyx_pybuffernd_sector_displacement.rcbuffer = &__pyx_pybuffer_sector_displacement; __pyx_pybuffer_template_image.pybuffer.buf = NULL; __pyx_pybuffer_template_image.refcount = 0; __pyx_pybuffernd_template_image.data = NULL; __pyx_pybuffernd_template_image.rcbuffer = &__pyx_pybuffer_template_image; __pyx_pybuffer_input_image.pybuffer.buf = NULL; __pyx_pybuffer_input_image.refcount = 0; __pyx_pybuffernd_input_image.data = NULL; __pyx_pybuffernd_input_image.rcbuffer = &__pyx_pybuffer_input_image; __pyx_pybuffer_mask.pybuffer.buf = NULL; __pyx_pybuffer_mask.refcount = 0; __pyx_pybuffernd_mask.data = NULL; __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_v_sector_displacement, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_sector_displacement.diminfo[0].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sector_displacement.diminfo[0].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sector_displacement.diminfo[1].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sector_displacement.diminfo[1].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_sector_displacement.diminfo[2].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_sector_displacement.diminfo[2].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[2]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_template_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_template_image.diminfo[0].strides = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_template_image.diminfo[0].shape = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_template_image.diminfo[1].strides = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_template_image.diminfo[1].shape = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_input_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_input_image.diminfo[0].strides = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_input_image.diminfo[0].shape = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_input_image.diminfo[1].strides = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_input_image.diminfo[1].shape = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask.diminfo[1].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask.diminfo[1].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[1]; /* "pysteps/motion/_vet.pyx":350 * """ * * cdef intp x_sectors = <intp> sector_displacement.shape[1] # <<<<<<<<<<<<<< * cdef intp y_sectors = <intp> sector_displacement.shape[2] * */ __pyx_v_x_sectors = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_sector_displacement->dimensions[1])); /* "pysteps/motion/_vet.pyx":351 * * cdef intp x_sectors = <intp> sector_displacement.shape[1] * cdef intp y_sectors = <intp> sector_displacement.shape[2] # <<<<<<<<<<<<<< * * cdef intp x_image_size = <intp> template_image.shape[0] */ __pyx_v_y_sectors = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_sector_displacement->dimensions[2])); /* "pysteps/motion/_vet.pyx":353 * cdef intp y_sectors = <intp> sector_displacement.shape[2] * * cdef intp x_image_size = <intp> template_image.shape[0] # <<<<<<<<<<<<<< * cdef intp y_image_size = <intp> template_image.shape[1] * */ __pyx_v_x_image_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_template_image->dimensions[0])); /* "pysteps/motion/_vet.pyx":354 * * cdef intp x_image_size = <intp> template_image.shape[0] * cdef intp y_image_size = <intp> template_image.shape[1] # <<<<<<<<<<<<<< * * if x_image_size % x_sectors != 0: */ __pyx_v_y_image_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_template_image->dimensions[1])); /* "pysteps/motion/_vet.pyx":356 * cdef intp y_image_size = <intp> template_image.shape[1] * * if x_image_size % x_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in x axis (axis=0)" */ __pyx_t_1 = (((__pyx_v_x_image_size % __pyx_v_x_sectors) != 0) != 0); if (unlikely(__pyx_t_1)) { /* "pysteps/motion/_vet.pyx":357 * * if x_image_size % x_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in x axis (axis=0)" * + " don't divide the image size") */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 357, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 357, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":356 * cdef intp y_image_size = <intp> template_image.shape[1] * * if x_image_size % x_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in x axis (axis=0)" */ } /* "pysteps/motion/_vet.pyx":361 * + " don't divide the image size") * * if y_image_size % y_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in y axis (axis=1) don't" */ __pyx_t_1 = (((__pyx_v_y_image_size % __pyx_v_y_sectors) != 0) != 0); if (unlikely(__pyx_t_1)) { /* "pysteps/motion/_vet.pyx":362 * * if y_image_size % y_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in y axis (axis=1) don't" * + " divide the image size") */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 362, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":361 * + " don't divide the image size") * * if y_image_size % y_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in y axis (axis=1) don't" */ } /* "pysteps/motion/_vet.pyx":367 * * cdef intp x_sector_size = ( * <intp> (round(x_image_size / x_sectors))) # <<<<<<<<<<<<<< * * cdef intp y_sector_size = ( */ __pyx_v_x_sector_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)round((__pyx_v_x_image_size / __pyx_v_x_sectors))); /* "pysteps/motion/_vet.pyx":370 * * cdef intp y_sector_size = ( * <intp> (round(y_image_size / y_sectors))) # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 3] displacement = ( */ __pyx_v_y_sector_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)round((__pyx_v_y_image_size / __pyx_v_y_sectors))); /* "pysteps/motion/_vet.pyx":373 * * cdef np.ndarray[float64, ndim = 3] displacement = ( * np.zeros([2, x_image_size, y_image_size], dtype=np.float64)) # <<<<<<<<<<<<<< * * cdef intp i, j, xy, l, m, ll, mm, i_sec, j_sec */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 373, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { __pyx_v_displacement = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 372, __pyx_L1_error) } else {__pyx_pybuffernd_displacement.diminfo[0].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_displacement.diminfo[0].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_displacement.diminfo[1].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_displacement.diminfo[1].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_displacement.diminfo[2].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_displacement.diminfo[2].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[2]; } } __pyx_t_7 = 0; __pyx_v_displacement = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":378 * cdef intp l0, m0, l1, m1, i_shift, j_shift, axis * * i_shift = (x_sector_size // 2) # <<<<<<<<<<<<<< * j_shift = (y_sector_size // 2) * */ __pyx_v_i_shift = (__pyx_v_x_sector_size / 2); /* "pysteps/motion/_vet.pyx":379 * * i_shift = (x_sector_size // 2) * j_shift = (y_sector_size // 2) # <<<<<<<<<<<<<< * * #Assume regular grid with constant grid spacing. */ __pyx_v_j_shift = (__pyx_v_y_sector_size / 2); /* "pysteps/motion/_vet.pyx":385 * cdef np.ndarray[float64, ndim = 1] x * cdef np.ndarray[float64, ndim = 1] y * x = np.arange(x_image_size, dtype='float64') # <<<<<<<<<<<<<< * y = np.arange(y_image_size, dtype='float64') * */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_arange); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_n_u_float64) < 0) __PYX_ERR(0, 385, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 385, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 385, __pyx_L1_error) } __pyx_t_8 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":386 * cdef np.ndarray[float64, ndim = 1] y * x = np.arange(x_image_size, dtype='float64') * y = np.arange(y_image_size, dtype='float64') # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 1] x_guess */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_arange); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_u_float64) < 0) __PYX_ERR(0, 386, __pyx_L1_error) __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 386, __pyx_L1_error) __pyx_t_13 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 386, __pyx_L1_error) } __pyx_t_13 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":391 * cdef np.ndarray[float64, ndim = 1] y_guess * * x_guess = x.reshape((x_sectors, x_sector_size)).mean(axis=1) # <<<<<<<<<<<<<< * y_guess = y.reshape((y_sectors, y_sector_size)).mean(axis=1) * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_x), __pyx_n_s_reshape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sector_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_6); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_5 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_6, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_mean); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 391, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 391, __pyx_L1_error) __pyx_t_14 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer, (PyObject*)__pyx_v_x_guess, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_x_guess.diminfo[0].strides = __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_guess.diminfo[0].shape = __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 391, __pyx_L1_error) } __pyx_t_14 = 0; __pyx_v_x_guess = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":392 * * x_guess = x.reshape((x_sectors, x_sector_size)).mean(axis=1) * y_guess = y.reshape((y_sectors, y_sector_size)).mean(axis=1) # <<<<<<<<<<<<<< * * cdef float64 sector_area */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_y), __pyx_n_s_reshape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sector_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_6); __pyx_t_3 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_mean); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 392, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 392, __pyx_L1_error) __pyx_t_15 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer, (PyObject*)__pyx_v_y_guess, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_y_guess.diminfo[0].strides = __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y_guess.diminfo[0].shape = __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 392, __pyx_L1_error) } __pyx_t_15 = 0; __pyx_v_y_guess = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":398 * cdef np.ndarray[float64, ndim = 3] interp_coef * * interp_coef = np.zeros([4, x_image_size, y_image_size], dtype=np.float64) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyList_SET_ITEM(__pyx_t_6, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 398, __pyx_L1_error) __pyx_t_16 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer, (PyObject*)__pyx_v_interp_coef, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_interp_coef.diminfo[0].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_interp_coef.diminfo[0].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_interp_coef.diminfo[1].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_interp_coef.diminfo[1].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_interp_coef.diminfo[2].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_interp_coef.diminfo[2].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 398, __pyx_L1_error) } __pyx_t_16 = 0; __pyx_v_interp_coef = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":400 * interp_coef = np.zeros([4, x_image_size, y_image_size], dtype=np.float64) * * cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) # <<<<<<<<<<<<<< * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 400, __pyx_L1_error) __pyx_t_17 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_l_i = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_l_i.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 400, __pyx_L1_error) } else {__pyx_pybuffernd_l_i.diminfo[0].strides = __pyx_pybuffernd_l_i.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_l_i.diminfo[0].shape = __pyx_pybuffernd_l_i.rcbuffer->pybuffer.shape[0]; } } __pyx_t_17 = 0; __pyx_v_l_i = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":401 * * cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 401, __pyx_L1_error) __pyx_t_18 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer, (PyObject*)__pyx_t_18, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_m_j = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_m_j.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 401, __pyx_L1_error) } else {__pyx_pybuffernd_m_j.diminfo[0].strides = __pyx_pybuffernd_m_j.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_m_j.diminfo[0].shape = __pyx_pybuffernd_m_j.rcbuffer->pybuffer.shape[0]; } } __pyx_t_18 = 0; __pyx_v_m_j = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":403 * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "pysteps/motion/_vet.pyx":404 * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, * x_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* "pysteps/motion/_vet.pyx":403 * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":405 * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, * x_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, */ __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":403 * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 403, __pyx_L1_error) __pyx_t_19 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_i_min = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 403, __pyx_L1_error) } else {__pyx_pybuffernd_i_min.diminfo[0].strides = __pyx_pybuffernd_i_min.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_i_min.diminfo[0].shape = __pyx_pybuffernd_i_min.rcbuffer->pybuffer.shape[0]; } } __pyx_t_19 = 0; __pyx_v_i_min = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":407 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "pysteps/motion/_vet.pyx":408 * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, * x_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "pysteps/motion/_vet.pyx":407 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_6 = 0; __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":409 * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, * x_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":407 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 407, __pyx_L1_error) __pyx_t_20 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_i_max = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 407, __pyx_L1_error) } else {__pyx_pybuffernd_i_max.diminfo[0].strides = __pyx_pybuffernd_i_max.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_i_max.diminfo[0].shape = __pyx_pybuffernd_i_max.rcbuffer->pybuffer.shape[0]; } } __pyx_t_20 = 0; __pyx_v_i_max = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":411 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "pysteps/motion/_vet.pyx":412 * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, * y_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); /* "pysteps/motion/_vet.pyx":411 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":413 * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, * y_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, */ __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":411 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 411, __pyx_L1_error) __pyx_t_21 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer, (PyObject*)__pyx_t_21, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_j_min = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 411, __pyx_L1_error) } else {__pyx_pybuffernd_j_min.diminfo[0].strides = __pyx_pybuffernd_j_min.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_j_min.diminfo[0].shape = __pyx_pybuffernd_j_min.rcbuffer->pybuffer.shape[0]; } } __pyx_t_21 = 0; __pyx_v_j_min = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":415 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "pysteps/motion/_vet.pyx":416 * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, * y_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* "pysteps/motion/_vet.pyx":415 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_6 = 0; __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":417 * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, * y_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * for i in prange(x_image_size, schedule='dynamic', nogil=True): */ __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":415 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 415, __pyx_L1_error) __pyx_t_22 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_j_max = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 415, __pyx_L1_error) } else {__pyx_pybuffernd_j_max.diminfo[0].strides = __pyx_pybuffernd_j_max.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_j_max.diminfo[0].shape = __pyx_pybuffernd_j_max.rcbuffer->pybuffer.shape[0]; } } __pyx_t_22 = 0; __pyx_v_j_max = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":419 * dtype=np.intp) * * for i in prange(x_image_size, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_23 = __pyx_v_x_image_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_25 = (__pyx_t_23 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_25 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_100, __pyx_t_101, __pyx_t_102, __pyx_t_103, __pyx_t_104, __pyx_t_105, __pyx_t_106, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75, __pyx_t_76, __pyx_t_77, __pyx_t_78, __pyx_t_79, __pyx_t_80, __pyx_t_81, __pyx_t_82, __pyx_t_83, __pyx_t_84, __pyx_t_85, __pyx_t_86, __pyx_t_87, __pyx_t_88, __pyx_t_89, __pyx_t_90, __pyx_t_91, __pyx_t_92, __pyx_t_93, __pyx_t_94, __pyx_t_95, __pyx_t_96, __pyx_t_97, __pyx_t_98, __pyx_t_99) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_l0) lastprivate(__pyx_v_l1) lastprivate(__pyx_v_m0) lastprivate(__pyx_v_m1) lastprivate(__pyx_v_sector_area) lastprivate(__pyx_v_xy) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_24 = 0; __pyx_t_24 < __pyx_t_25; __pyx_t_24++){ { __pyx_v_i = (__pyx_t_7pysteps_6motion_4_vet_intp)(0 + 1 * __pyx_t_24); /* Initialize private variables to invalid values */ __pyx_v_j = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_l0 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_l1 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_m0 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_m1 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_sector_area = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_xy = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); /* "pysteps/motion/_vet.pyx":421 * for i in prange(x_image_size, schedule='dynamic', nogil=True): * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) # <<<<<<<<<<<<<< * l0 = int_max(l0, 0) * l1 = l0 + 1 */ __pyx_v_l0 = __pyx_f_7pysteps_6motion_4_vet_int_min(((__pyx_v_i - __pyx_v_i_shift) / __pyx_v_x_sector_size), (__pyx_v_x_sectors - 2)); /* "pysteps/motion/_vet.pyx":422 * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) * l0 = int_max(l0, 0) # <<<<<<<<<<<<<< * l1 = l0 + 1 * */ __pyx_v_l0 = __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_v_l0, 0); /* "pysteps/motion/_vet.pyx":423 * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) * l0 = int_max(l0, 0) * l1 = l0 + 1 # <<<<<<<<<<<<<< * * l_i[i] = l0 */ __pyx_v_l1 = (__pyx_v_l0 + 1); /* "pysteps/motion/_vet.pyx":425 * l1 = l0 + 1 * * l_i[i] = l0 # <<<<<<<<<<<<<< * * for j in range(y_image_size): */ __pyx_t_26 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_l_i.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_l_i.diminfo[0].strides) = __pyx_v_l0; /* "pysteps/motion/_vet.pyx":427 * l_i[i] = l0 * * for j in range(y_image_size): # <<<<<<<<<<<<<< * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) * m0 = int_max(m0, 0) */ __pyx_t_27 = __pyx_v_y_image_size; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_j = __pyx_t_29; /* "pysteps/motion/_vet.pyx":428 * * for j in range(y_image_size): * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) # <<<<<<<<<<<<<< * m0 = int_max(m0, 0) * m1 = m0 + 1 */ __pyx_v_m0 = __pyx_f_7pysteps_6motion_4_vet_int_min(((__pyx_v_j - __pyx_v_j_shift) / __pyx_v_y_sector_size), (__pyx_v_y_sectors - 2)); /* "pysteps/motion/_vet.pyx":429 * for j in range(y_image_size): * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) * m0 = int_max(m0, 0) # <<<<<<<<<<<<<< * m1 = m0 + 1 * */ __pyx_v_m0 = __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_v_m0, 0); /* "pysteps/motion/_vet.pyx":430 * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) * m0 = int_max(m0, 0) * m1 = m0 + 1 # <<<<<<<<<<<<<< * * m_j[j] = m0 */ __pyx_v_m1 = (__pyx_v_m0 + 1); /* "pysteps/motion/_vet.pyx":432 * m1 = m0 + 1 * * m_j[j] = m0 # <<<<<<<<<<<<<< * * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) */ __pyx_t_30 = __pyx_v_j; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_m_j.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_m_j.diminfo[0].strides) = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":434 * m_j[j] = m0 * * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) # <<<<<<<<<<<<<< * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] */ __pyx_t_31 = __pyx_v_l1; __pyx_t_32 = __pyx_v_l0; __pyx_t_33 = __pyx_v_m1; __pyx_t_34 = __pyx_v_m0; __pyx_v_sector_area = (((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_x_guess.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_32, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_y_guess.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_y_guess.diminfo[0].strides)))); /* "pysteps/motion/_vet.pyx":436 * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] # <<<<<<<<<<<<<< * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] */ __pyx_t_35 = __pyx_v_l1; __pyx_t_36 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":437 * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] * - x[i] * y_guess[m1] # <<<<<<<<<<<<<< * - x_guess[l1] * y[j] * + x[i] * y[j]) / sector_area */ __pyx_t_37 = __pyx_v_i; __pyx_t_38 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":438 * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] # <<<<<<<<<<<<<< * + x[i] * y[j]) / sector_area * */ __pyx_t_39 = __pyx_v_l1; __pyx_t_40 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":439 * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] * + x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] */ __pyx_t_41 = __pyx_v_i; __pyx_t_42 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":436 * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] # <<<<<<<<<<<<<< * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] */ __pyx_t_43 = 0; __pyx_t_44 = __pyx_v_i; __pyx_t_45 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_43, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_44, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_45, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_35, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_y_guess.diminfo[0].strides))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_37, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_39, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_y.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_42, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":441 * + x[i] * y[j]) / sector_area * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] # <<<<<<<<<<<<<< * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] */ __pyx_t_46 = __pyx_v_l1; __pyx_t_47 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":442 * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] * + x[i] * y_guess[m0] # <<<<<<<<<<<<<< * + x_guess[l1] * y[j] * - x[i] * y[j]) / sector_area */ __pyx_t_48 = __pyx_v_i; __pyx_t_49 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":443 * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] # <<<<<<<<<<<<<< * - x[i] * y[j]) / sector_area * */ __pyx_t_50 = __pyx_v_l1; __pyx_t_51 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":444 * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] * - x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] */ __pyx_t_52 = __pyx_v_i; __pyx_t_53 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":441 * + x[i] * y[j]) / sector_area * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] # <<<<<<<<<<<<<< * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] */ __pyx_t_54 = 1; __pyx_t_55 = __pyx_v_i; __pyx_t_56 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_54, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_55, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_56, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((-(*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_47, __pyx_pybuffernd_y_guess.diminfo[0].strides))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_49, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_51, __pyx_pybuffernd_y.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":446 * - x[i] * y[j]) / sector_area * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] # <<<<<<<<<<<<<< * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] */ __pyx_t_57 = __pyx_v_l0; __pyx_t_58 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":447 * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] * + x[i] * y_guess[m1] # <<<<<<<<<<<<<< * + x_guess[l0] * y[j] * - x[i] * y[j]) / sector_area */ __pyx_t_59 = __pyx_v_i; __pyx_t_60 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":448 * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] # <<<<<<<<<<<<<< * - x[i] * y[j]) / sector_area * */ __pyx_t_61 = __pyx_v_l0; __pyx_t_62 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":449 * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] * - x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] */ __pyx_t_63 = __pyx_v_i; __pyx_t_64 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":446 * - x[i] * y[j]) / sector_area * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] # <<<<<<<<<<<<<< * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] */ __pyx_t_65 = 2; __pyx_t_66 = __pyx_v_i; __pyx_t_67 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_65, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_66, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_67, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((-(*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_57, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_y_guess.diminfo[0].strides))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_59, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_61, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_y.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_63, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":451 * - x[i] * y[j]) / sector_area * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] # <<<<<<<<<<<<<< * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] */ __pyx_t_68 = __pyx_v_l0; __pyx_t_69 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":452 * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] * - x[i] * y_guess[m0] # <<<<<<<<<<<<<< * - x_guess[l0] * y[j] * + x[i] * y[j]) / sector_area */ __pyx_t_70 = __pyx_v_i; __pyx_t_71 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":453 * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] # <<<<<<<<<<<<<< * + x[i] * y[j]) / sector_area * */ __pyx_t_72 = __pyx_v_l0; __pyx_t_73 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":454 * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] * + x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * for xy in range(2): */ __pyx_t_74 = __pyx_v_i; __pyx_t_75 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":451 * - x[i] * y[j]) / sector_area * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] # <<<<<<<<<<<<<< * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] */ __pyx_t_76 = 3; __pyx_t_77 = __pyx_v_i; __pyx_t_78 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_76, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_77, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_78, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_69, __pyx_pybuffernd_y_guess.diminfo[0].strides))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_71, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_73, __pyx_pybuffernd_y.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_75, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":456 * + x[i] * y[j]) / sector_area * * for xy in range(2): # <<<<<<<<<<<<<< * displacement[xy, i, j] = ( * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] */ for (__pyx_t_79 = 0; __pyx_t_79 < 2; __pyx_t_79+=1) { __pyx_v_xy = __pyx_t_79; /* "pysteps/motion/_vet.pyx":458 * for xy in range(2): * displacement[xy, i, j] = ( * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] # <<<<<<<<<<<<<< * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] */ __pyx_t_80 = __pyx_v_xy; __pyx_t_81 = __pyx_v_l0; __pyx_t_82 = __pyx_v_m0; __pyx_t_83 = 0; __pyx_t_84 = __pyx_v_i; __pyx_t_85 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":459 * displacement[xy, i, j] = ( * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] # <<<<<<<<<<<<<< * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] * + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] */ __pyx_t_86 = __pyx_v_xy; __pyx_t_87 = __pyx_v_l0; __pyx_t_88 = __pyx_v_m1; __pyx_t_89 = 1; __pyx_t_90 = __pyx_v_i; __pyx_t_91 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":460 * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] # <<<<<<<<<<<<<< * + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] * ) */ __pyx_t_92 = __pyx_v_xy; __pyx_t_93 = __pyx_v_l1; __pyx_t_94 = __pyx_v_m0; __pyx_t_95 = 2; __pyx_t_96 = __pyx_v_i; __pyx_t_97 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":461 * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] * + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] # <<<<<<<<<<<<<< * ) * */ __pyx_t_98 = __pyx_v_xy; __pyx_t_99 = __pyx_v_l1; __pyx_t_100 = __pyx_v_m1; __pyx_t_101 = 3; __pyx_t_102 = __pyx_v_i; __pyx_t_103 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":457 * * for xy in range(2): * displacement[xy, i, j] = ( # <<<<<<<<<<<<<< * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] */ __pyx_t_104 = __pyx_v_xy; __pyx_t_105 = __pyx_v_i; __pyx_t_106 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_104, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_105, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_106, __pyx_pybuffernd_displacement.diminfo[2].strides) = (((((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_80, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_81, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_82, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_83, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_84, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_85, __pyx_pybuffernd_interp_coef.diminfo[2].strides))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_86, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_87, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_88, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_89, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_90, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_91, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_92, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_93, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_94, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_95, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_96, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_97, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_98, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_99, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_100, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_101, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_102, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_103, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "pysteps/motion/_vet.pyx":419 * dtype=np.intp) * * for i in prange(x_image_size, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L7; } __pyx_L7:; } } /* "pysteps/motion/_vet.pyx":464 * ) * * for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_unique); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_l_i)); __Pyx_GIVEREF(((PyObject *)__pyx_v_l_i)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_l_i)); /* "pysteps/motion/_vet.pyx":465 * * for l, i, counts in zip(*np.unique(l_i, * return_index=True, # <<<<<<<<<<<<<< * return_counts=True)): * i_min[l] = i */ __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 465, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 465, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":466 * for l, i, counts in zip(*np.unique(l_i, * return_index=True, * return_counts=True)): # <<<<<<<<<<<<<< * i_min[l] = i * i_max[l] = i + counts - 1 */ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return_counts, Py_True) < 0) __PYX_ERR(0, 465, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":464 * ) * * for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PySequence_Tuple(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(PyList_CheckExact(__pyx_t_4)) || PyTuple_CheckExact(__pyx_t_4)) { __pyx_t_3 = __pyx_t_4; __Pyx_INCREF(__pyx_t_3); __pyx_t_107 = 0; __pyx_t_108 = NULL; } else { __pyx_t_107 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_108 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_108)) __PYX_ERR(0, 464, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; for (;;) { if (likely(!__pyx_t_108)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_107 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_107); __Pyx_INCREF(__pyx_t_4); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 464, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_3, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_107 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_107); __Pyx_INCREF(__pyx_t_4); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 464, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_3, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_108(__pyx_t_3); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 464, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) { PyObject* sequence = __pyx_t_4; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 464, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_6 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_2 = PyList_GET_ITEM(sequence, 0); __pyx_t_5 = PyList_GET_ITEM(sequence, 1); __pyx_t_6 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { Py_ssize_t index = -1; __pyx_t_109 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_109)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_109); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_109)->tp_iternext; index = 0; __pyx_t_2 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_2)) goto __pyx_L20_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); index = 1; __pyx_t_5 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_5)) goto __pyx_L20_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 2; __pyx_t_6 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_6)) goto __pyx_L20_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_109), 3) < 0) __PYX_ERR(0, 464, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; goto __pyx_L21_unpacking_done; __pyx_L20_unpacking_failed:; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 464, __pyx_L1_error) __pyx_L21_unpacking_done:; } __pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_5); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_l = __pyx_t_25; __pyx_v_i = __pyx_t_24; __Pyx_XDECREF_SET(__pyx_v_counts, __pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":467 * return_index=True, * return_counts=True)): * i_min[l] = i # <<<<<<<<<<<<<< * i_max[l] = i + counts - 1 * */ __pyx_t_111 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_111, __pyx_pybuffernd_i_min.diminfo[0].strides) = __pyx_v_i; /* "pysteps/motion/_vet.pyx":468 * return_counts=True)): * i_min[l] = i * i_max[l] = i + counts - 1 # <<<<<<<<<<<<<< * * for m, j, counts in zip(*np.unique(m_j, */ __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_i); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_4, __pyx_v_counts); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_t_6, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_4); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_112 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_112, __pyx_pybuffernd_i_max.diminfo[0].strides) = __pyx_t_24; /* "pysteps/motion/_vet.pyx":464 * ) * * for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":470 * i_max[l] = i + counts - 1 * * for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_unique); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_m_j)); __Pyx_GIVEREF(((PyObject *)__pyx_v_m_j)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_m_j)); /* "pysteps/motion/_vet.pyx":471 * * for m, j, counts in zip(*np.unique(m_j, * return_index=True, # <<<<<<<<<<<<<< * return_counts=True)): * j_min[m] = j */ __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 471, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":472 * for m, j, counts in zip(*np.unique(m_j, * return_index=True, * return_counts=True)): # <<<<<<<<<<<<<< * j_min[m] = j * j_max[m] = j + counts */ if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_return_counts, Py_True) < 0) __PYX_ERR(0, 471, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":470 * i_max[l] = i + counts - 1 * * for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PySequence_Tuple(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_t_5)) || PyTuple_CheckExact(__pyx_t_5)) { __pyx_t_6 = __pyx_t_5; __Pyx_INCREF(__pyx_t_6); __pyx_t_107 = 0; __pyx_t_108 = NULL; } else { __pyx_t_107 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_108 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_108)) __PYX_ERR(0, 470, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (;;) { if (likely(!__pyx_t_108)) { if (likely(PyList_CheckExact(__pyx_t_6))) { if (__pyx_t_107 >= PyList_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_107); __Pyx_INCREF(__pyx_t_5); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 470, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_107 >= PyTuple_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_107); __Pyx_INCREF(__pyx_t_5); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 470, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_108(__pyx_t_6); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 470, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { PyObject* sequence = __pyx_t_5; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 470, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_3 = PyList_GET_ITEM(sequence, 0); __pyx_t_4 = PyList_GET_ITEM(sequence, 1); __pyx_t_2 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_2); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { Py_ssize_t index = -1; __pyx_t_109 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_109)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_109); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_109)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_3)) goto __pyx_L24_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_4)) goto __pyx_L24_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 2; __pyx_t_2 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_2)) goto __pyx_L24_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_109), 3) < 0) __PYX_ERR(0, 470, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; goto __pyx_L25_unpacking_done; __pyx_L24_unpacking_failed:; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 470, __pyx_L1_error) __pyx_L25_unpacking_done:; } __pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_3); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_4); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_m = __pyx_t_24; __pyx_v_j = __pyx_t_25; __Pyx_XDECREF_SET(__pyx_v_counts, __pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":473 * return_index=True, * return_counts=True)): * j_min[m] = j # <<<<<<<<<<<<<< * j_max[m] = j + counts * */ __pyx_t_113 = __pyx_v_m; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_113, __pyx_pybuffernd_j_min.diminfo[0].strides) = __pyx_v_j; /* "pysteps/motion/_vet.pyx":474 * return_counts=True)): * j_min[m] = j * j_max[m] = j + counts # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 2] morphed_image */ __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_j); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyNumber_Add(__pyx_t_5, __pyx_v_counts); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_114 = __pyx_v_m; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_114, __pyx_pybuffernd_j_max.diminfo[0].strides) = __pyx_t_25; /* "pysteps/motion/_vet.pyx":470 * i_max[l] = i + counts - 1 * * for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":483 * * cdef np.ndarray[float64, ndim = 2] buffer = \ * np.zeros([x_image_size, y_image_size], dtype=np.float64) # <<<<<<<<<<<<<< * * grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_5); __pyx_t_6 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 483, __pyx_L1_error) __pyx_t_115 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_t_115, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_buffer = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_buffer.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 482, __pyx_L1_error) } else {__pyx_pybuffernd_buffer.diminfo[0].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_buffer.diminfo[0].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_buffer.diminfo[1].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_buffer.diminfo[1].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[1]; } } __pyx_t_115 = 0; __pyx_v_buffer = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":485 * np.zeros([x_image_size, y_image_size], dtype=np.float64) * * grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) # <<<<<<<<<<<<<< * * grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyList_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 485, __pyx_L1_error) __pyx_t_116 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_t_116, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_smooth, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_grad_smooth.diminfo[0].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_smooth.diminfo[0].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_smooth.diminfo[1].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_smooth.diminfo[1].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_smooth.diminfo[2].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_smooth.diminfo[2].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 485, __pyx_L1_error) } __pyx_t_116 = 0; __pyx_v_grad_smooth = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":487 * grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) * * grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) # <<<<<<<<<<<<<< * * cdef float64 residuals = 0 */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_t_5); __pyx_t_6 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 487, __pyx_L1_error) __pyx_t_117 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer, (PyObject*)__pyx_t_117, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_residuals, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_grad_residuals.diminfo[0].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_residuals.diminfo[0].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_residuals.diminfo[1].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_residuals.diminfo[1].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_residuals.diminfo[2].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_residuals.diminfo[2].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 487, __pyx_L1_error) } __pyx_t_117 = 0; __pyx_v_grad_residuals = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":489 * grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) * * cdef float64 residuals = 0 # <<<<<<<<<<<<<< * * # Compute residual part of the cost function */ __pyx_v_residuals = 0.0; /* "pysteps/motion/_vet.pyx":492 * * # Compute residual part of the cost function * if gradient: # <<<<<<<<<<<<<< * * morphed_image, morph_mask, _gradient_data = _warp(template_image, */ __pyx_t_1 = (__pyx_v_gradient != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":494 * if gradient: * * morphed_image, morph_mask, _gradient_data = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_warp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "pysteps/motion/_vet.pyx":496 * morphed_image, morph_mask, _gradient_data = _warp(template_image, * mask, * displacement, # <<<<<<<<<<<<<< * gradient=True) * */ __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_template_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_template_image)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_template_image)); __Pyx_INCREF(((PyObject *)__pyx_v_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_mask)); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_mask)); __Pyx_INCREF(((PyObject *)__pyx_v_displacement)); __Pyx_GIVEREF(((PyObject *)__pyx_v_displacement)); PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_displacement)); /* "pysteps/motion/_vet.pyx":497 * mask, * displacement, * gradient=True) # <<<<<<<<<<<<<< * * morph_mask[mask > 0] = 1 */ __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_gradient, Py_True) < 0) __PYX_ERR(0, 497, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":494 * if gradient: * * morphed_image, morph_mask, _gradient_data = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 494, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_5 = PyList_GET_ITEM(sequence, 0); __pyx_t_4 = PyList_GET_ITEM(sequence, 1); __pyx_t_3 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { Py_ssize_t index = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_6)->tp_iternext; index = 0; __pyx_t_5 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L27_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_4 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_4)) goto __pyx_L27_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 2; __pyx_t_3 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_3)) goto __pyx_L27_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_6), 3) < 0) __PYX_ERR(0, 494, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L28_unpacking_done; __pyx_L27_unpacking_failed:; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 494, __pyx_L1_error) __pyx_L28_unpacking_done:; } if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error) if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error) if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error) __pyx_t_118 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_118, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_morphed_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_morphed_image.diminfo[0].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_image.diminfo[0].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_image.diminfo[1].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_image.diminfo[1].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error) } __pyx_t_118 = 0; __pyx_v_morphed_image = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_t_119 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_119, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_morph_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_morph_mask.diminfo[0].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morph_mask.diminfo[0].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morph_mask.diminfo[1].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morph_mask.diminfo[1].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error) } __pyx_t_119 = 0; __pyx_v_morph_mask = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_t_120 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer, (PyObject*)__pyx_t_120, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer, (PyObject*)__pyx_v__gradient_data, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd__gradient_data.diminfo[0].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__gradient_data.diminfo[0].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__gradient_data.diminfo[1].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__gradient_data.diminfo[1].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd__gradient_data.diminfo[2].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd__gradient_data.diminfo[2].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error) } __pyx_t_120 = 0; __pyx_v__gradient_data = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":499 * gradient=True) * * morph_mask[mask > 0] = 1 # <<<<<<<<<<<<<< * * buffer = (2 * (input_image - morphed_image)) */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 499, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morph_mask), __pyx_t_2, __pyx_int_1) < 0)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":501 * morph_mask[mask > 0] = 1 * * buffer = (2 * (input_image - morphed_image)) # <<<<<<<<<<<<<< * buffer[morph_mask == 1] = 0 * */ __pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_input_image), ((PyObject *)__pyx_v_morphed_image)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 501, __pyx_L1_error) __pyx_t_115 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_t_115, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_v_buffer, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_buffer.diminfo[0].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_buffer.diminfo[0].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_buffer.diminfo[1].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_buffer.diminfo[1].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 501, __pyx_L1_error) } __pyx_t_115 = 0; __Pyx_DECREF_SET(__pyx_v_buffer, ((PyArrayObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":502 * * buffer = (2 * (input_image - morphed_image)) * buffer[morph_mask == 1] = 0 # <<<<<<<<<<<<<< * * _gradient_data[0, :] *= buffer */ __pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_morph_mask), __pyx_int_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 502, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_buffer), __pyx_t_3, __pyx_int_0) < 0)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":504 * buffer[morph_mask == 1] = 0 * * _gradient_data[0, :] *= buffer # <<<<<<<<<<<<<< * _gradient_data[1, :] *= buffer * */ __Pyx_INCREF(__pyx_tuple__4); __pyx_t_121 = __pyx_tuple__4; __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyNumber_InPlaceMultiply(__pyx_t_3, ((PyObject *)__pyx_v_buffer)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121, __pyx_t_2) < 0)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_121); __pyx_t_121 = 0; /* "pysteps/motion/_vet.pyx":505 * * _gradient_data[0, :] *= buffer * _gradient_data[1, :] *= buffer # <<<<<<<<<<<<<< * * for l in range(x_sectors): # schedule='dynamic', nogil=True): */ __Pyx_INCREF(__pyx_tuple__5); __pyx_t_121 = __pyx_tuple__5; __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, ((PyObject *)__pyx_v_buffer)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121, __pyx_t_3) < 0)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_121); __pyx_t_121 = 0; /* "pysteps/motion/_vet.pyx":507 * _gradient_data[1, :] *= buffer * * for l in range(x_sectors): # schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * for m in range(y_sectors): * for i in range(i_min[l], i_max[l]): */ __pyx_t_25 = __pyx_v_x_sectors; __pyx_t_24 = __pyx_t_25; for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_24; __pyx_t_23+=1) { __pyx_v_l = __pyx_t_23; /* "pysteps/motion/_vet.pyx":508 * * for l in range(x_sectors): # schedule='dynamic', nogil=True): * for m in range(y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":509 * for l in range(x_sectors): # schedule='dynamic', nogil=True): * for m in range(y_sectors): * for i in range(i_min[l], i_max[l]): # <<<<<<<<<<<<<< * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ */ __pyx_t_122 = __pyx_v_l; __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_122, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_123 = __pyx_v_l; __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_123, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":510 * for m in range(y_sectors): * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] */ __pyx_t_126 = __pyx_v_m; __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_126, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_128 = __pyx_v_m; __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_128, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":511 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[0, i, j]) */ __pyx_t_131 = 0; __pyx_t_132 = __pyx_v_l; __pyx_t_133 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":512 * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[0, i, j]) * */ __pyx_t_134 = 0; __pyx_t_135 = __pyx_v_i; __pyx_t_136 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":513 * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] * * interp_coef[0, i, j]) # <<<<<<<<<<<<<< * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ */ __pyx_t_137 = 0; __pyx_t_138 = __pyx_v_i; __pyx_t_139 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":511 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[0, i, j]) */ __pyx_t_140 = 0; __pyx_t_141 = __pyx_v_l; __pyx_t_142 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_140, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_141, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_142, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_131, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_132, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_133, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_134, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_135, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_136, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_137, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_138, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_139, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); /* "pysteps/motion/_vet.pyx":515 * * interp_coef[0, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] * * interp_coef[0, i, j]) */ __pyx_t_143 = 1; __pyx_t_144 = __pyx_v_l; __pyx_t_145 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":516 * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[0, i, j]) * */ __pyx_t_146 = 1; __pyx_t_147 = __pyx_v_i; __pyx_t_148 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":517 * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] * * interp_coef[0, i, j]) # <<<<<<<<<<<<<< * * for m in range(1, y_sectors): */ __pyx_t_149 = 0; __pyx_t_150 = __pyx_v_i; __pyx_t_151 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":515 * * interp_coef[0, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] * * interp_coef[0, i, j]) */ __pyx_t_152 = 1; __pyx_t_153 = __pyx_v_l; __pyx_t_154 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_152, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_153, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_154, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_143, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_144, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_145, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_146, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_147, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_148, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_149, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_150, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_151, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); } } } /* "pysteps/motion/_vet.pyx":519 * * interp_coef[0, i, j]) * * for m in range(1, y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 1; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":520 * * for m in range(1, y_sectors): * for i in range(i_min[l], i_max[l]): # <<<<<<<<<<<<<< * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ */ __pyx_t_155 = __pyx_v_l; __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_155, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_156 = __pyx_v_l; __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_156, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":521 * for m in range(1, y_sectors): * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] */ __pyx_t_157 = (__pyx_v_m - 1); __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_157, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_158 = (__pyx_v_m - 1); __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_158, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":522 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[1, i, j]) */ __pyx_t_159 = 0; __pyx_t_160 = __pyx_v_l; __pyx_t_161 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":523 * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[1, i, j]) * */ __pyx_t_162 = 0; __pyx_t_163 = __pyx_v_i; __pyx_t_164 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":524 * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] * * interp_coef[1, i, j]) # <<<<<<<<<<<<<< * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ */ __pyx_t_165 = 1; __pyx_t_166 = __pyx_v_i; __pyx_t_167 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":522 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[1, i, j]) */ __pyx_t_168 = 0; __pyx_t_169 = __pyx_v_l; __pyx_t_170 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_168, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_169, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_170, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_159, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_160, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_161, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_162, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_163, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_164, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_165, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_166, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_167, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); /* "pysteps/motion/_vet.pyx":526 * * interp_coef[1, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] # TODO: Check this line! * * interp_coef[1, i, j]) */ __pyx_t_171 = 1; __pyx_t_172 = __pyx_v_l; __pyx_t_173 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":527 * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] # TODO: Check this line! # <<<<<<<<<<<<<< * * interp_coef[1, i, j]) * */ __pyx_t_174 = 1; __pyx_t_175 = __pyx_v_i; __pyx_t_176 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":528 * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] # TODO: Check this line! * * interp_coef[1, i, j]) # <<<<<<<<<<<<<< * * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): */ __pyx_t_177 = 1; __pyx_t_178 = __pyx_v_i; __pyx_t_179 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":526 * * interp_coef[1, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] # TODO: Check this line! * * interp_coef[1, i, j]) */ __pyx_t_180 = 1; __pyx_t_181 = __pyx_v_l; __pyx_t_182 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_180, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_181, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_182, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_171, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_172, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_173, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_174, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_175, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_176, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_177, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_178, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_179, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); } } } } /* "pysteps/motion/_vet.pyx":530 * * interp_coef[1, i, j]) * * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * for m in range(y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): */ __pyx_t_25 = __pyx_v_x_sectors; __pyx_t_24 = __pyx_t_25; for (__pyx_t_23 = 1; __pyx_t_23 < __pyx_t_24; __pyx_t_23+=1) { __pyx_v_l = __pyx_t_23; /* "pysteps/motion/_vet.pyx":531 * * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): * for m in range(y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":532 * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): * for m in range(y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): # <<<<<<<<<<<<<< * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] */ __pyx_t_183 = (__pyx_v_l - 1); __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_183, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_184 = (__pyx_v_l - 1); __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_184, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":533 * for m in range(y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) */ __pyx_t_185 = __pyx_v_m; __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_185, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_186 = __pyx_v_m; __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_186, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":534 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_187 = 0; __pyx_t_188 = __pyx_v_i; __pyx_t_189 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":535 * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) # <<<<<<<<<<<<<< * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[2, i, j]) */ __pyx_t_190 = 2; __pyx_t_191 = __pyx_v_i; __pyx_t_192 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":534 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_193 = 0; __pyx_t_194 = __pyx_v_l; __pyx_t_195 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_193, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_194, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_195, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_187, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_188, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_189, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_190, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_191, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_192, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":536 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * */ __pyx_t_196 = 1; __pyx_t_197 = __pyx_v_i; __pyx_t_198 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":537 * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[2, i, j]) # <<<<<<<<<<<<<< * * for m in range(1, y_sectors): */ __pyx_t_199 = 2; __pyx_t_200 = __pyx_v_i; __pyx_t_201 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":536 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * */ __pyx_t_202 = 1; __pyx_t_203 = __pyx_v_l; __pyx_t_204 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_202, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_203, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_204, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_196, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_197, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_198, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_199, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_200, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_201, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); } } } /* "pysteps/motion/_vet.pyx":539 * * interp_coef[2, i, j]) * * for m in range(1, y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 1; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":540 * * for m in range(1, y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): # <<<<<<<<<<<<<< * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] */ __pyx_t_205 = (__pyx_v_l - 1); __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_205, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_206 = (__pyx_v_l - 1); __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_206, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":541 * for m in range(1, y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) */ __pyx_t_207 = (__pyx_v_m - 1); __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_207, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_208 = (__pyx_v_m - 1); __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_208, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":542 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_209 = 0; __pyx_t_210 = __pyx_v_i; __pyx_t_211 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":543 * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) # <<<<<<<<<<<<<< * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[3, i, j]) */ __pyx_t_212 = 3; __pyx_t_213 = __pyx_v_i; __pyx_t_214 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":542 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_215 = 0; __pyx_t_216 = __pyx_v_l; __pyx_t_217 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_215, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_216, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_217, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_209, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_210, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_211, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_212, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_213, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_214, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":544 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * */ __pyx_t_218 = 1; __pyx_t_219 = __pyx_v_i; __pyx_t_220 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":545 * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[3, i, j]) # <<<<<<<<<<<<<< * * */ __pyx_t_221 = 3; __pyx_t_222 = __pyx_v_i; __pyx_t_223 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":544 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * */ __pyx_t_224 = 1; __pyx_t_225 = __pyx_v_l; __pyx_t_226 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_224, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_225, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_226, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_218, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_219, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_220, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_221, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_222, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_223, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); } } } } /* "pysteps/motion/_vet.pyx":492 * * # Compute residual part of the cost function * if gradient: # <<<<<<<<<<<<<< * * morphed_image, morph_mask, _gradient_data = _warp(template_image, */ goto __pyx_L26; } /* "pysteps/motion/_vet.pyx":550 * else: * * morphed_image, morph_mask = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ /*else*/ { __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_warp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "pysteps/motion/_vet.pyx":552 * morphed_image, morph_mask = _warp(template_image, * mask, * displacement, # <<<<<<<<<<<<<< * gradient=False) * morph_mask[mask > 0] = 1 */ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_template_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_template_image)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_template_image)); __Pyx_INCREF(((PyObject *)__pyx_v_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_mask)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_mask)); __Pyx_INCREF(((PyObject *)__pyx_v_displacement)); __Pyx_GIVEREF(((PyObject *)__pyx_v_displacement)); PyTuple_SET_ITEM(__pyx_t_2, 2, ((PyObject *)__pyx_v_displacement)); /* "pysteps/motion/_vet.pyx":553 * mask, * displacement, * gradient=False) # <<<<<<<<<<<<<< * morph_mask[mask > 0] = 1 * residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) */ __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_gradient, Py_False) < 0) __PYX_ERR(0, 553, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":550 * else: * * morphed_image, morph_mask = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { PyObject* sequence = __pyx_t_5; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 550, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_4 = PyList_GET_ITEM(sequence, 0); __pyx_t_2 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_2); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { Py_ssize_t index = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_3)->tp_iternext; index = 0; __pyx_t_4 = __pyx_t_110(__pyx_t_3); if (unlikely(!__pyx_t_4)) goto __pyx_L57_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 1; __pyx_t_2 = __pyx_t_110(__pyx_t_3); if (unlikely(!__pyx_t_2)) goto __pyx_L57_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_3), 2) < 0) __PYX_ERR(0, 550, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L58_unpacking_done; __pyx_L57_unpacking_failed:; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 550, __pyx_L1_error) __pyx_L58_unpacking_done:; } if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 550, __pyx_L1_error) if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 550, __pyx_L1_error) __pyx_t_118 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_118, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_morphed_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_morphed_image.diminfo[0].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_image.diminfo[0].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_image.diminfo[1].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_image.diminfo[1].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 550, __pyx_L1_error) } __pyx_t_118 = 0; __pyx_v_morphed_image = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_t_119 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_119, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_morph_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_morph_mask.diminfo[0].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morph_mask.diminfo[0].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morph_mask.diminfo[1].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morph_mask.diminfo[1].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 550, __pyx_L1_error) } __pyx_t_119 = 0; __pyx_v_morph_mask = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":554 * displacement, * gradient=False) * morph_mask[mask > 0] = 1 # <<<<<<<<<<<<<< * residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) * */ __pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 554, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morph_mask), __pyx_t_5, __pyx_int_1) < 0)) __PYX_ERR(0, 554, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":555 * gradient=False) * morph_mask[mask > 0] = 1 * residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) # <<<<<<<<<<<<<< * * # Compute smoothness constraint part of the cost function */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_morphed_image), ((PyObject *)__pyx_v_input_image)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_morph_mask), __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 555, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Power(__pyx_t_6, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_5 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_227 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_227 == ((npy_float64)-1)) && PyErr_Occurred())) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_residuals = __pyx_t_227; } __pyx_L26:; /* "pysteps/motion/_vet.pyx":558 * * # Compute smoothness constraint part of the cost function * cdef float64 smoothness_penalty = 0 # <<<<<<<<<<<<<< * * cdef float64 df_dx2 = 0 */ __pyx_v_smoothness_penalty = 0.0; /* "pysteps/motion/_vet.pyx":560 * cdef float64 smoothness_penalty = 0 * * cdef float64 df_dx2 = 0 # <<<<<<<<<<<<<< * cdef float64 df_dxdy = 0 * cdef float64 df_dy2 = 0 */ __pyx_v_df_dx2 = 0.0; /* "pysteps/motion/_vet.pyx":561 * * cdef float64 df_dx2 = 0 * cdef float64 df_dxdy = 0 # <<<<<<<<<<<<<< * cdef float64 df_dy2 = 0 * */ __pyx_v_df_dxdy = 0.0; /* "pysteps/motion/_vet.pyx":562 * cdef float64 df_dx2 = 0 * cdef float64 df_dxdy = 0 * cdef float64 df_dy2 = 0 # <<<<<<<<<<<<<< * * cdef float64 inloop_smoothness_penalty */ __pyx_v_df_dy2 = 0.0; /* "pysteps/motion/_vet.pyx":566 * cdef float64 inloop_smoothness_penalty * * if smooth_gain > 0.: # <<<<<<<<<<<<<< * * for axis in range(2): #, schedule='dynamic', nogil=True): */ __pyx_t_1 = ((__pyx_v_smooth_gain > 0.) != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":568 * if smooth_gain > 0.: * * for axis in range(2): #, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * inloop_smoothness_penalty = 0 */ for (__pyx_t_25 = 0; __pyx_t_25 < 2; __pyx_t_25+=1) { __pyx_v_axis = __pyx_t_25; /* "pysteps/motion/_vet.pyx":570 * for axis in range(2): #, schedule='dynamic', nogil=True): * * inloop_smoothness_penalty = 0 # <<<<<<<<<<<<<< * * for l in range(1, x_sectors - 1): */ __pyx_v_inloop_smoothness_penalty = 0.0; /* "pysteps/motion/_vet.pyx":572 * inloop_smoothness_penalty = 0 * * for l in range(1, x_sectors - 1): # <<<<<<<<<<<<<< * * for m in range(1, y_sectors - 1): */ __pyx_t_228 = (__pyx_v_x_sectors - 1); __pyx_t_229 = __pyx_t_228; for (__pyx_t_24 = 1; __pyx_t_24 < __pyx_t_229; __pyx_t_24+=1) { __pyx_v_l = __pyx_t_24; /* "pysteps/motion/_vet.pyx":574 * for l in range(1, x_sectors - 1): * * for m in range(1, y_sectors - 1): # <<<<<<<<<<<<<< * df_dx2 = (sector_displacement[axis, l + 1, m] * - 2 * sector_displacement[axis, l, m] */ __pyx_t_230 = (__pyx_v_y_sectors - 1); __pyx_t_231 = __pyx_t_230; for (__pyx_t_23 = 1; __pyx_t_23 < __pyx_t_231; __pyx_t_23+=1) { __pyx_v_m = __pyx_t_23; /* "pysteps/motion/_vet.pyx":575 * * for m in range(1, y_sectors - 1): * df_dx2 = (sector_displacement[axis, l + 1, m] # <<<<<<<<<<<<<< * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l - 1, m]) */ __pyx_t_232 = __pyx_v_axis; __pyx_t_233 = (__pyx_v_l + 1); __pyx_t_234 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":576 * for m in range(1, y_sectors - 1): * df_dx2 = (sector_displacement[axis, l + 1, m] * - 2 * sector_displacement[axis, l, m] # <<<<<<<<<<<<<< * + sector_displacement[axis, l - 1, m]) * */ __pyx_t_235 = __pyx_v_axis; __pyx_t_236 = __pyx_v_l; __pyx_t_237 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":577 * df_dx2 = (sector_displacement[axis, l + 1, m] * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l - 1, m]) # <<<<<<<<<<<<<< * * df_dx2 = df_dx2 / (x_sector_size * x_sector_size) */ __pyx_t_238 = __pyx_v_axis; __pyx_t_239 = (__pyx_v_l - 1); __pyx_t_240 = __pyx_v_m; __pyx_v_df_dx2 = (((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_232, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_233, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_234, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (2.0 * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_235, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_236, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_237, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_238, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_239, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_240, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":579 * + sector_displacement[axis, l - 1, m]) * * df_dx2 = df_dx2 / (x_sector_size * x_sector_size) # <<<<<<<<<<<<<< * * df_dy2 = (sector_displacement[axis, l, m + 1] */ __pyx_v_df_dx2 = (__pyx_v_df_dx2 / ((__pyx_t_7pysteps_6motion_4_vet_float64)(__pyx_v_x_sector_size * __pyx_v_x_sector_size))); /* "pysteps/motion/_vet.pyx":581 * df_dx2 = df_dx2 / (x_sector_size * x_sector_size) * * df_dy2 = (sector_displacement[axis, l, m + 1] # <<<<<<<<<<<<<< * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l, m - 1]) */ __pyx_t_241 = __pyx_v_axis; __pyx_t_242 = __pyx_v_l; __pyx_t_243 = (__pyx_v_m + 1); /* "pysteps/motion/_vet.pyx":582 * * df_dy2 = (sector_displacement[axis, l, m + 1] * - 2 * sector_displacement[axis, l, m] # <<<<<<<<<<<<<< * + sector_displacement[axis, l, m - 1]) * */ __pyx_t_244 = __pyx_v_axis; __pyx_t_245 = __pyx_v_l; __pyx_t_246 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":583 * df_dy2 = (sector_displacement[axis, l, m + 1] * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l, m - 1]) # <<<<<<<<<<<<<< * * df_dy2 = df_dy2 / (y_sector_size * y_sector_size) */ __pyx_t_247 = __pyx_v_axis; __pyx_t_248 = __pyx_v_l; __pyx_t_249 = (__pyx_v_m - 1); __pyx_v_df_dy2 = (((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_241, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_242, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_243, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (2.0 * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_244, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_245, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_246, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_247, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_248, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_249, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":585 * + sector_displacement[axis, l, m - 1]) * * df_dy2 = df_dy2 / (y_sector_size * y_sector_size) # <<<<<<<<<<<<<< * * df_dxdy = (sector_displacement[axis, l + 1, m + 1] */ __pyx_v_df_dy2 = (__pyx_v_df_dy2 / ((__pyx_t_7pysteps_6motion_4_vet_float64)(__pyx_v_y_sector_size * __pyx_v_y_sector_size))); /* "pysteps/motion/_vet.pyx":587 * df_dy2 = df_dy2 / (y_sector_size * y_sector_size) * * df_dxdy = (sector_displacement[axis, l + 1, m + 1] # <<<<<<<<<<<<<< * - sector_displacement[axis, l + 1, m - 1] * - sector_displacement[axis, l - 1, m + 1] */ __pyx_t_250 = __pyx_v_axis; __pyx_t_251 = (__pyx_v_l + 1); __pyx_t_252 = (__pyx_v_m + 1); /* "pysteps/motion/_vet.pyx":588 * * df_dxdy = (sector_displacement[axis, l + 1, m + 1] * - sector_displacement[axis, l + 1, m - 1] # <<<<<<<<<<<<<< * - sector_displacement[axis, l - 1, m + 1] * + sector_displacement[axis, l - 1, m - 1]) */ __pyx_t_253 = __pyx_v_axis; __pyx_t_254 = (__pyx_v_l + 1); __pyx_t_255 = (__pyx_v_m - 1); /* "pysteps/motion/_vet.pyx":589 * df_dxdy = (sector_displacement[axis, l + 1, m + 1] * - sector_displacement[axis, l + 1, m - 1] * - sector_displacement[axis, l - 1, m + 1] # <<<<<<<<<<<<<< * + sector_displacement[axis, l - 1, m - 1]) * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) */ __pyx_t_256 = __pyx_v_axis; __pyx_t_257 = (__pyx_v_l - 1); __pyx_t_258 = (__pyx_v_m + 1); /* "pysteps/motion/_vet.pyx":590 * - sector_displacement[axis, l + 1, m - 1] * - sector_displacement[axis, l - 1, m + 1] * + sector_displacement[axis, l - 1, m - 1]) # <<<<<<<<<<<<<< * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) * */ __pyx_t_259 = __pyx_v_axis; __pyx_t_260 = (__pyx_v_l - 1); __pyx_t_261 = (__pyx_v_m - 1); __pyx_v_df_dxdy = ((((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_250, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_251, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_252, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_253, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_254, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_255, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_256, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_257, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_258, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_259, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_260, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_261, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":591 * - sector_displacement[axis, l - 1, m + 1] * + sector_displacement[axis, l - 1, m - 1]) * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) # <<<<<<<<<<<<<< * * if gradient: */ __pyx_v_df_dxdy = (__pyx_v_df_dxdy / ((__pyx_t_7pysteps_6motion_4_vet_float64)((4 * __pyx_v_x_sector_size) * __pyx_v_y_sector_size))); /* "pysteps/motion/_vet.pyx":593 * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) * * if gradient: # <<<<<<<<<<<<<< * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 */ __pyx_t_1 = (__pyx_v_gradient != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":594 * * if gradient: * grad_smooth[axis, l, m] -= 2 * df_dx2 # <<<<<<<<<<<<<< * grad_smooth[axis, l + 1, m] += df_dx2 * grad_smooth[axis, l - 1, m] += df_dx2 */ __pyx_t_262 = __pyx_v_axis; __pyx_t_263 = __pyx_v_l; __pyx_t_264 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_262, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_263, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_264, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= (2.0 * __pyx_v_df_dx2); /* "pysteps/motion/_vet.pyx":595 * if gradient: * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 # <<<<<<<<<<<<<< * grad_smooth[axis, l - 1, m] += df_dx2 * */ __pyx_t_265 = __pyx_v_axis; __pyx_t_266 = (__pyx_v_l + 1); __pyx_t_267 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_265, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_266, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_267, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dx2; /* "pysteps/motion/_vet.pyx":596 * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 * grad_smooth[axis, l - 1, m] += df_dx2 # <<<<<<<<<<<<<< * * grad_smooth[axis, l, m] -= 2 * df_dy2 */ __pyx_t_268 = __pyx_v_axis; __pyx_t_269 = (__pyx_v_l - 1); __pyx_t_270 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_268, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_269, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_270, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dx2; /* "pysteps/motion/_vet.pyx":598 * grad_smooth[axis, l - 1, m] += df_dx2 * * grad_smooth[axis, l, m] -= 2 * df_dy2 # <<<<<<<<<<<<<< * grad_smooth[axis, l, m - 1] += df_dy2 * grad_smooth[axis, l, m + 1] += df_dy2 */ __pyx_t_271 = __pyx_v_axis; __pyx_t_272 = __pyx_v_l; __pyx_t_273 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_271, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_272, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_273, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= (2.0 * __pyx_v_df_dy2); /* "pysteps/motion/_vet.pyx":599 * * grad_smooth[axis, l, m] -= 2 * df_dy2 * grad_smooth[axis, l, m - 1] += df_dy2 # <<<<<<<<<<<<<< * grad_smooth[axis, l, m + 1] += df_dy2 * */ __pyx_t_274 = __pyx_v_axis; __pyx_t_275 = __pyx_v_l; __pyx_t_276 = (__pyx_v_m - 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_274, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_275, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_276, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dy2; /* "pysteps/motion/_vet.pyx":600 * grad_smooth[axis, l, m] -= 2 * df_dy2 * grad_smooth[axis, l, m - 1] += df_dy2 * grad_smooth[axis, l, m + 1] += df_dy2 # <<<<<<<<<<<<<< * * grad_smooth[axis, l - 1, m - 1] += df_dxdy */ __pyx_t_277 = __pyx_v_axis; __pyx_t_278 = __pyx_v_l; __pyx_t_279 = (__pyx_v_m + 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_277, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_278, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_279, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dy2; /* "pysteps/motion/_vet.pyx":602 * grad_smooth[axis, l, m + 1] += df_dy2 * * grad_smooth[axis, l - 1, m - 1] += df_dxdy # <<<<<<<<<<<<<< * grad_smooth[axis, l - 1, m + 1] -= df_dxdy * grad_smooth[axis, l + 1, m - 1] -= df_dxdy */ __pyx_t_280 = __pyx_v_axis; __pyx_t_281 = (__pyx_v_l - 1); __pyx_t_282 = (__pyx_v_m - 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_280, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_281, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_282, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":603 * * grad_smooth[axis, l - 1, m - 1] += df_dxdy * grad_smooth[axis, l - 1, m + 1] -= df_dxdy # <<<<<<<<<<<<<< * grad_smooth[axis, l + 1, m - 1] -= df_dxdy * grad_smooth[axis, l + 1, m + 1] += df_dxdy */ __pyx_t_283 = __pyx_v_axis; __pyx_t_284 = (__pyx_v_l - 1); __pyx_t_285 = (__pyx_v_m + 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_283, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_284, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_285, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":604 * grad_smooth[axis, l - 1, m - 1] += df_dxdy * grad_smooth[axis, l - 1, m + 1] -= df_dxdy * grad_smooth[axis, l + 1, m - 1] -= df_dxdy # <<<<<<<<<<<<<< * grad_smooth[axis, l + 1, m + 1] += df_dxdy * */ __pyx_t_286 = __pyx_v_axis; __pyx_t_287 = (__pyx_v_l + 1); __pyx_t_288 = (__pyx_v_m - 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_286, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_287, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_288, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":605 * grad_smooth[axis, l - 1, m + 1] -= df_dxdy * grad_smooth[axis, l + 1, m - 1] -= df_dxdy * grad_smooth[axis, l + 1, m + 1] += df_dxdy # <<<<<<<<<<<<<< * * inloop_smoothness_penalty = (df_dx2 * df_dx2 */ __pyx_t_289 = __pyx_v_axis; __pyx_t_290 = (__pyx_v_l + 1); __pyx_t_291 = (__pyx_v_m + 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_289, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_290, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_291, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":593 * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) * * if gradient: # <<<<<<<<<<<<<< * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 */ } /* "pysteps/motion/_vet.pyx":609 * inloop_smoothness_penalty = (df_dx2 * df_dx2 * + 2 * df_dxdy * df_dxdy * + df_dy2 * df_dy2) # <<<<<<<<<<<<<< * * smoothness_penalty += inloop_smoothness_penalty */ __pyx_v_inloop_smoothness_penalty = (((__pyx_v_df_dx2 * __pyx_v_df_dx2) + ((2.0 * __pyx_v_df_dxdy) * __pyx_v_df_dxdy)) + (__pyx_v_df_dy2 * __pyx_v_df_dy2)); /* "pysteps/motion/_vet.pyx":611 * + df_dy2 * df_dy2) * * smoothness_penalty += inloop_smoothness_penalty # <<<<<<<<<<<<<< * * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size */ __pyx_v_smoothness_penalty = (__pyx_v_smoothness_penalty + __pyx_v_inloop_smoothness_penalty); } } } /* "pysteps/motion/_vet.pyx":613 * smoothness_penalty += inloop_smoothness_penalty * * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size # <<<<<<<<<<<<<< * * if gradient: */ __pyx_v_smoothness_penalty = (__pyx_v_smoothness_penalty * __pyx_v_smooth_gain); /* "pysteps/motion/_vet.pyx":566 * cdef float64 inloop_smoothness_penalty * * if smooth_gain > 0.: # <<<<<<<<<<<<<< * * for axis in range(2): #, schedule='dynamic', nogil=True): */ } /* "pysteps/motion/_vet.pyx":615 * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size * * if gradient: # <<<<<<<<<<<<<< * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size * */ __pyx_t_1 = (__pyx_v_gradient != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":616 * * if gradient: * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size # <<<<<<<<<<<<<< * * return grad_residuals + grad_smooth */ __pyx_t_5 = PyFloat_FromDouble((2.0 * __pyx_v_smooth_gain)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyNumber_InPlaceMultiply(((PyObject *)__pyx_v_grad_smooth), __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 616, __pyx_L1_error) __pyx_t_116 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_t_116, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_smooth, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_grad_smooth.diminfo[0].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_smooth.diminfo[0].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_smooth.diminfo[1].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_smooth.diminfo[1].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_smooth.diminfo[2].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_smooth.diminfo[2].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 616, __pyx_L1_error) } __pyx_t_116 = 0; __Pyx_DECREF_SET(__pyx_v_grad_smooth, ((PyArrayObject *)__pyx_t_4)); __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":618 * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size * * return grad_residuals + grad_smooth # <<<<<<<<<<<<<< * else: * return residuals, smoothness_penalty */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyNumber_Add(((PyObject *)__pyx_v_grad_residuals), ((PyObject *)__pyx_v_grad_smooth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 618, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "pysteps/motion/_vet.pyx":615 * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size * * if gradient: # <<<<<<<<<<<<<< * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size * */ } /* "pysteps/motion/_vet.pyx":620 * return grad_residuals + grad_smooth * else: * return residuals, smoothness_penalty # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_residuals); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyFloat_FromDouble(__pyx_v_smoothness_penalty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_109); __Pyx_XDECREF(__pyx_t_121); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("pysteps.motion._vet._cost_function", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_displacement); __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_x_guess); __Pyx_XDECREF((PyObject *)__pyx_v_y_guess); __Pyx_XDECREF((PyObject *)__pyx_v_interp_coef); __Pyx_XDECREF((PyObject *)__pyx_v_l_i); __Pyx_XDECREF((PyObject *)__pyx_v_m_j); __Pyx_XDECREF((PyObject *)__pyx_v_i_min); __Pyx_XDECREF((PyObject *)__pyx_v_i_max); __Pyx_XDECREF((PyObject *)__pyx_v_j_min); __Pyx_XDECREF((PyObject *)__pyx_v_j_max); __Pyx_XDECREF(__pyx_v_counts); __Pyx_XDECREF((PyObject *)__pyx_v_morphed_image); __Pyx_XDECREF((PyObject *)__pyx_v_morph_mask); __Pyx_XDECREF((PyObject *)__pyx_v__gradient_data); __Pyx_XDECREF((PyObject *)__pyx_v_grad_residuals); __Pyx_XDECREF((PyObject *)__pyx_v_grad_smooth); __Pyx_XDECREF((PyObject *)__pyx_v_buffer); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; PyArray_Descr *__pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * * cdef int i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":266 * cdef int i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":268 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":271 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 272, __pyx_L1_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L9; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L9:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = <dtype>PyArray_DESCR(self) * cdef int offset */ __pyx_v_f = NULL; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * cdef int t * cdef char* f = NULL * cdef dtype descr = <dtype>PyArray_DESCR(self) # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_7 = PyArray_DESCR(__pyx_v_self); __pyx_t_3 = ((PyObject *)__pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * cdef int offset * * info.obj = self # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(descr): */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":303 * * if not PyDataType_HASFIELDS(descr): * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L15_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_L15_next_or:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L14_bool_binop_done:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_1)) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 306, __pyx_L1_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; case NPY_UBYTE: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_v_f = ((char *)"B"); break; case NPY_SHORT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_v_f = ((char *)"h"); break; case NPY_USHORT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_v_f = ((char *)"H"); break; case NPY_INT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_v_f = ((char *)"i"); break; case NPY_UINT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":312 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_v_f = ((char *)"I"); break; case NPY_LONG: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":313 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_v_f = ((char *)"l"); break; case NPY_ULONG: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":314 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_v_f = ((char *)"L"); break; case NPY_LONGLONG: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":315 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_v_f = ((char *)"q"); break; case NPY_ULONGLONG: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":316 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_v_f = ((char *)"Q"); break; case NPY_FLOAT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":317 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_v_f = ((char *)"f"); break; case NPY_DOUBLE: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":318 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_v_f = ((char *)"d"); break; case NPY_LONGDOUBLE: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":319 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_v_f = ((char *)"g"); break; case NPY_CFLOAT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":320 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_v_f = ((char *)"Zf"); break; case NPY_CDOUBLE: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":321 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_v_f = ((char *)"Zd"); break; case NPY_CLONGDOUBLE: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":322 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_v_f = ((char *)"Zg"); break; case NPY_OBJECT: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":323 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_v_f = ((char *)"O"); break; default: /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":325 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 325, __pyx_L1_error) break; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":326 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":327 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":329 * return * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":330 * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":331 * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":332 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error) __pyx_v_f = __pyx_t_9; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":335 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":339 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":341 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":828 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 * return <tuple>d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":848 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 851, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 852, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 853, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 853, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (unlikely(__pyx_t_6)) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 856, __pyx_L1_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_6)) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 860, __pyx_L1_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":870 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":871 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":872 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":875 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":878 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 878, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (unlikely(__pyx_t_6)) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 880, __pyx_L1_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":883 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":884 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":885 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":886 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":887 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":888 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":889 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":890 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":891 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":892 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":893 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":894 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":895 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":896 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":897 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":898 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":899 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(__pyx_t_6)) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":901 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 901, __pyx_L1_error) } __pyx_L15:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":902 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":906 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 906, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":907 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< * PyArray_SetBaseObject(arr, base) * */ Py_INCREF(__pyx_v_base); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_v_base; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1027 * * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< * if base is NULL: * return None */ __pyx_v_base = PyArray_BASE(__pyx_v_arr); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ __pyx_t_1 = ((__pyx_v_base == NULL) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1029 * base = PyArray_BASE(arr) * if base is NULL: * return None # <<<<<<<<<<<<<< * return <object>base * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1030 * if base is NULL: * return None * return <object>base # <<<<<<<<<<<<<< * * # Versions of the import_* functions which are more suitable for */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_base)); __pyx_r = ((PyObject *)__pyx_v_base); goto __pyx_L0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1036 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1036, __pyx_L3_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1037 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1037, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1038, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1038, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1042 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1042, __pyx_L3_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1043 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1043, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1044, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1044, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1048 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1048, __pyx_L3_error) /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1049 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1049, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1050 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1050, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1050, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec__vet(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec__vet}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "_vet", __pyx_k_Cython_module_for_morphing_and, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_Error_computing_cost_function, __pyx_k_Error_computing_cost_function, sizeof(__pyx_k_Error_computing_cost_function), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_kp_u_The_number_of_sectors_in_x_axis, __pyx_k_The_number_of_sectors_in_x_axis, sizeof(__pyx_k_The_number_of_sectors_in_x_axis), 0, 1, 0, 0}, {&__pyx_kp_u_The_number_of_sectors_in_y_axis, __pyx_k_The_number_of_sectors_in_y_axis, sizeof(__pyx_k_The_number_of_sectors_in_y_axis), 0, 1, 0, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, {&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1}, {&__pyx_n_s_buffer, __pyx_k_buffer, sizeof(__pyx_k_buffer), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_cost_function, __pyx_k_cost_function, sizeof(__pyx_k_cost_function), 0, 0, 1, 1}, {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1}, {&__pyx_n_s_df_dx2, __pyx_k_df_dx2, sizeof(__pyx_k_df_dx2), 0, 0, 1, 1}, {&__pyx_n_s_df_dxdy, __pyx_k_df_dxdy, sizeof(__pyx_k_df_dxdy), 0, 0, 1, 1}, {&__pyx_n_s_df_dy2, __pyx_k_df_dy2, sizeof(__pyx_k_df_dy2), 0, 0, 1, 1}, {&__pyx_n_s_displacement, __pyx_k_displacement, sizeof(__pyx_k_displacement), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_f00, __pyx_k_f00, sizeof(__pyx_k_f00), 0, 0, 1, 1}, {&__pyx_n_s_f01, __pyx_k_f01, sizeof(__pyx_k_f01), 0, 0, 1, 1}, {&__pyx_n_s_f10, __pyx_k_f10, sizeof(__pyx_k_f10), 0, 0, 1, 1}, {&__pyx_n_s_f11, __pyx_k_f11, sizeof(__pyx_k_f11), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_u_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 1, 0, 1}, {&__pyx_n_s_full, __pyx_k_full, sizeof(__pyx_k_full), 0, 0, 1, 1}, {&__pyx_n_s_grad_residuals, __pyx_k_grad_residuals, sizeof(__pyx_k_grad_residuals), 0, 0, 1, 1}, {&__pyx_n_s_grad_smooth, __pyx_k_grad_smooth, sizeof(__pyx_k_grad_smooth), 0, 0, 1, 1}, {&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1}, {&__pyx_n_s_gradient_data, __pyx_k_gradient_data, sizeof(__pyx_k_gradient_data), 0, 0, 1, 1}, {&__pyx_n_s_gradient_values, __pyx_k_gradient_values, sizeof(__pyx_k_gradient_values), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_i_max, __pyx_k_i_max, sizeof(__pyx_k_i_max), 0, 0, 1, 1}, {&__pyx_n_s_i_min, __pyx_k_i_min, sizeof(__pyx_k_i_min), 0, 0, 1, 1}, {&__pyx_n_s_i_sec, __pyx_k_i_sec, sizeof(__pyx_k_i_sec), 0, 0, 1, 1}, {&__pyx_n_s_i_shift, __pyx_k_i_shift, sizeof(__pyx_k_i_shift), 0, 0, 1, 1}, {&__pyx_n_s_image, __pyx_k_image, sizeof(__pyx_k_image), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_inloop_smoothness_penalty, __pyx_k_inloop_smoothness_penalty, sizeof(__pyx_k_inloop_smoothness_penalty), 0, 0, 1, 1}, {&__pyx_n_s_input_image, __pyx_k_input_image, sizeof(__pyx_k_input_image), 0, 0, 1, 1}, {&__pyx_n_s_int8, __pyx_k_int8, sizeof(__pyx_k_int8), 0, 0, 1, 1}, {&__pyx_n_s_interp_coef, __pyx_k_interp_coef, sizeof(__pyx_k_interp_coef), 0, 0, 1, 1}, {&__pyx_n_s_intp, __pyx_k_intp, sizeof(__pyx_k_intp), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_j_max, __pyx_k_j_max, sizeof(__pyx_k_j_max), 0, 0, 1, 1}, {&__pyx_n_s_j_min, __pyx_k_j_min, sizeof(__pyx_k_j_min), 0, 0, 1, 1}, {&__pyx_n_s_j_sec, __pyx_k_j_sec, sizeof(__pyx_k_j_sec), 0, 0, 1, 1}, {&__pyx_n_s_j_shift, __pyx_k_j_shift, sizeof(__pyx_k_j_shift), 0, 0, 1, 1}, {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, {&__pyx_n_s_l0, __pyx_k_l0, sizeof(__pyx_k_l0), 0, 0, 1, 1}, {&__pyx_n_s_l1, __pyx_k_l1, sizeof(__pyx_k_l1), 0, 0, 1, 1}, {&__pyx_n_s_l_i, __pyx_k_l_i, sizeof(__pyx_k_l_i), 0, 0, 1, 1}, {&__pyx_n_s_ll, __pyx_k_ll, sizeof(__pyx_k_ll), 0, 0, 1, 1}, {&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, {&__pyx_n_s_m0, __pyx_k_m0, sizeof(__pyx_k_m0), 0, 0, 1, 1}, {&__pyx_n_s_m1, __pyx_k_m1, sizeof(__pyx_k_m1), 0, 0, 1, 1}, {&__pyx_n_s_m_j, __pyx_k_m_j, sizeof(__pyx_k_m_j), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1}, {&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1}, {&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1}, {&__pyx_n_s_morph_mask, __pyx_k_morph_mask, sizeof(__pyx_k_morph_mask), 0, 0, 1, 1}, {&__pyx_n_s_morphed_image, __pyx_k_morphed_image, sizeof(__pyx_k_morphed_image), 0, 0, 1, 1}, {&__pyx_n_s_morphed_mask, __pyx_k_morphed_mask, sizeof(__pyx_k_morphed_mask), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_new_image, __pyx_k_new_image, sizeof(__pyx_k_new_image), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, {&__pyx_n_s_pysteps_motion__vet, __pyx_k_pysteps_motion__vet, sizeof(__pyx_k_pysteps_motion__vet), 0, 0, 1, 1}, {&__pyx_kp_s_pysteps_motion__vet_pyx, __pyx_k_pysteps_motion__vet_pyx, sizeof(__pyx_k_pysteps_motion__vet_pyx), 0, 0, 1, 0}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1}, {&__pyx_n_s_residuals, __pyx_k_residuals, sizeof(__pyx_k_residuals), 0, 0, 1, 1}, {&__pyx_n_s_return_counts, __pyx_k_return_counts, sizeof(__pyx_k_return_counts), 0, 0, 1, 1}, {&__pyx_n_s_return_index, __pyx_k_return_index, sizeof(__pyx_k_return_index), 0, 0, 1, 1}, {&__pyx_n_s_sector_area, __pyx_k_sector_area, sizeof(__pyx_k_sector_area), 0, 0, 1, 1}, {&__pyx_n_s_sector_displacement, __pyx_k_sector_displacement, sizeof(__pyx_k_sector_displacement), 0, 0, 1, 1}, {&__pyx_n_s_smooth_gain, __pyx_k_smooth_gain, sizeof(__pyx_k_smooth_gain), 0, 0, 1, 1}, {&__pyx_n_s_smoothness_penalty, __pyx_k_smoothness_penalty, sizeof(__pyx_k_smoothness_penalty), 0, 0, 1, 1}, {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, {&__pyx_n_s_template_image, __pyx_k_template_image, sizeof(__pyx_k_template_image), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_unique, __pyx_k_unique, sizeof(__pyx_k_unique), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_warp, __pyx_k_warp, sizeof(__pyx_k_warp), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_x_ceil, __pyx_k_x_ceil, sizeof(__pyx_k_x_ceil), 0, 0, 1, 1}, {&__pyx_n_s_x_float, __pyx_k_x_float, sizeof(__pyx_k_x_float), 0, 0, 1, 1}, {&__pyx_n_s_x_floor, __pyx_k_x_floor, sizeof(__pyx_k_x_floor), 0, 0, 1, 1}, {&__pyx_n_s_x_guess, __pyx_k_x_guess, sizeof(__pyx_k_x_guess), 0, 0, 1, 1}, {&__pyx_n_s_x_image_size, __pyx_k_x_image_size, sizeof(__pyx_k_x_image_size), 0, 0, 1, 1}, {&__pyx_n_s_x_max_float, __pyx_k_x_max_float, sizeof(__pyx_k_x_max_float), 0, 0, 1, 1}, {&__pyx_n_s_x_max_int, __pyx_k_x_max_int, sizeof(__pyx_k_x_max_int), 0, 0, 1, 1}, {&__pyx_n_s_x_sector_size, __pyx_k_x_sector_size, sizeof(__pyx_k_x_sector_size), 0, 0, 1, 1}, {&__pyx_n_s_x_sectors, __pyx_k_x_sectors, sizeof(__pyx_k_x_sectors), 0, 0, 1, 1}, {&__pyx_n_s_xy, __pyx_k_xy, sizeof(__pyx_k_xy), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_y_ceil, __pyx_k_y_ceil, sizeof(__pyx_k_y_ceil), 0, 0, 1, 1}, {&__pyx_n_s_y_float, __pyx_k_y_float, sizeof(__pyx_k_y_float), 0, 0, 1, 1}, {&__pyx_n_s_y_floor, __pyx_k_y_floor, sizeof(__pyx_k_y_floor), 0, 0, 1, 1}, {&__pyx_n_s_y_guess, __pyx_k_y_guess, sizeof(__pyx_k_y_guess), 0, 0, 1, 1}, {&__pyx_n_s_y_image_size, __pyx_k_y_image_size, sizeof(__pyx_k_y_image_size), 0, 0, 1, 1}, {&__pyx_n_s_y_max_float, __pyx_k_y_max_float, sizeof(__pyx_k_y_max_float), 0, 0, 1, 1}, {&__pyx_n_s_y_max_int, __pyx_k_y_max_int, sizeof(__pyx_k_y_max_int), 0, 0, 1, 1}, {&__pyx_n_s_y_sector_size, __pyx_k_y_sector_size, sizeof(__pyx_k_y_sector_size), 0, 0, 1, 1}, {&__pyx_n_s_y_sectors, __pyx_k_y_sectors, sizeof(__pyx_k_y_sectors), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 163, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 357, __pyx_L1_error) __pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) __PYX_ERR(0, 464, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 856, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1038, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "pysteps/motion/_vet.pyx":357 * * if x_image_size % x_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in x axis (axis=0)" * + " don't divide the image size") */ __pyx_tuple_ = PyTuple_Pack(2, __pyx_kp_u_Error_computing_cost_function, __pyx_kp_u_The_number_of_sectors_in_x_axis); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 357, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "pysteps/motion/_vet.pyx":362 * * if y_image_size % y_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in y axis (axis=1) don't" * + " divide the image size") */ __pyx_tuple__2 = PyTuple_Pack(2, __pyx_kp_u_Error_computing_cost_function, __pyx_kp_u_The_number_of_sectors_in_y_axis); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "pysteps/motion/_vet.pyx":504 * buffer[morph_mask == 1] = 0 * * _gradient_data[0, :] *= buffer # <<<<<<<<<<<<<< * _gradient_data[1, :] *= buffer * */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); __pyx_tuple__4 = PyTuple_Pack(2, __pyx_int_0, __pyx_slice__3); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "pysteps/motion/_vet.pyx":505 * * _gradient_data[0, :] *= buffer * _gradient_data[1, :] *= buffer # <<<<<<<<<<<<<< * * for l in range(x_sectors): # schedule='dynamic', nogil=True): */ __pyx_tuple__5 = PyTuple_Pack(2, __pyx_int_1, __pyx_slice__3); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ __pyx_tuple__13 = PyTuple_Pack(27, __pyx_n_s_image, __pyx_n_s_mask, __pyx_n_s_displacement, __pyx_n_s_gradient, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_new_image, __pyx_n_s_morphed_mask, __pyx_n_s_gradient_values, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_x_max_int, __pyx_n_s_y_max_int, __pyx_n_s_x_max_float, __pyx_n_s_y_max_float, __pyx_n_s_x_float, __pyx_n_s_y_float, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_x_floor, __pyx_n_s_x_ceil, __pyx_n_s_y_floor, __pyx_n_s_y_ceil, __pyx_n_s_f00, __pyx_n_s_f10, __pyx_n_s_f01, __pyx_n_s_f11); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(4, 0, 27, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pysteps_motion__vet_pyx, __pyx_n_s_warp, 67, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 67, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ __pyx_tuple__15 = PyTuple_Pack(54, __pyx_n_s_sector_displacement, __pyx_n_s_template_image, __pyx_n_s_input_image, __pyx_n_s_mask, __pyx_n_s_smooth_gain, __pyx_n_s_gradient, __pyx_n_s_x_sectors, __pyx_n_s_y_sectors, __pyx_n_s_x_image_size, __pyx_n_s_y_image_size, __pyx_n_s_x_sector_size, __pyx_n_s_y_sector_size, __pyx_n_s_displacement, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_xy, __pyx_n_s_l, __pyx_n_s_m, __pyx_n_s_ll, __pyx_n_s_mm, __pyx_n_s_i_sec, __pyx_n_s_j_sec, __pyx_n_s_l0, __pyx_n_s_m0, __pyx_n_s_l1, __pyx_n_s_m1, __pyx_n_s_i_shift, __pyx_n_s_j_shift, __pyx_n_s_axis, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_x_guess, __pyx_n_s_y_guess, __pyx_n_s_sector_area, __pyx_n_s_interp_coef, __pyx_n_s_l_i, __pyx_n_s_m_j, __pyx_n_s_i_min, __pyx_n_s_i_max, __pyx_n_s_j_min, __pyx_n_s_j_max, __pyx_n_s_counts, __pyx_n_s_morphed_image, __pyx_n_s_morph_mask, __pyx_n_s_gradient_data, __pyx_n_s_grad_residuals, __pyx_n_s_grad_smooth, __pyx_n_s_buffer, __pyx_n_s_residuals, __pyx_n_s_smoothness_penalty, __pyx_n_s_df_dx2, __pyx_n_s_df_dxdy, __pyx_n_s_df_dy2, __pyx_n_s_inloop_smoothness_penalty); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(6, 0, 54, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pysteps_motion__vet_pyx, __pyx_n_s_cost_function, 240, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 206, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 229, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 233, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 242, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 918, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC init_vet(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC init_vet(void) #else __Pyx_PyMODINIT_FUNC PyInit__vet(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit__vet(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec__vet(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module '_vet' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__vet(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("_vet", __pyx_methods, __pyx_k_Cython_module_for_morphing_and, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_pysteps__motion___vet) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "pysteps.motion._vet")) { if (unlikely(PyDict_SetItemString(modules, "pysteps.motion._vet", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); (void)__Pyx_modinit_type_init_code(); if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "pysteps/motion/_vet.pyx":8 * """ * from cython.parallel import prange, parallel * import numpy as np # <<<<<<<<<<<<<< * * cimport cython */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7pysteps_6motion_4_vet_1_warp, NULL, __pyx_n_s_pysteps_motion__vet); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_warp, __pyx_t_1) < 0) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7pysteps_6motion_4_vet_3_cost_function, NULL, __pyx_n_s_pysteps_motion__vet); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cost_function, __pyx_t_1) < 0) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":1 * # -*- coding: utf-8 -*- # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../.conda/envs/pysteps/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init pysteps.motion._vet", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init pysteps.motion._vet"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* BufferGetAndValidate */ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (unlikely(info->buf == NULL)) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static int __Pyx__GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { buf->buf = NULL; if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { __Pyx_ZeroBuffer(buf); return -1; } if (unlikely(buf->ndim != nd)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if (unlikely((size_t)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_SafeReleaseBuffer(buf); return -1; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* BufferFallbackError */ static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a - b); if (likely((x^a) >= 0 || (x^~b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } } x = a - b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla - llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("subtract", return NULL) result = ((double)a) - (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } #endif /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* DictGetItem */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { if (unlikely(PyTuple_Check(key))) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) { PyErr_SetObject(PyExc_KeyError, args); Py_DECREF(args); } } else { PyErr_SetObject(PyExc_KeyError, key); } } return NULL; } Py_INCREF(value); return value; } #endif /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) { const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(Py_intptr_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(Py_intptr_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = (float)(1.0) / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = (float)(1.0) / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0.0, -1.0); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = (double)(1.0) / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = (double)(1.0) / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0.0, -1.0); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) { const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(Py_intptr_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (Py_intptr_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (Py_intptr_t) 0; case 1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, digits[0]) case 2: if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) >= 2 * PyLong_SHIFT) { return (Py_intptr_t) (((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])); } } break; case 3: if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) >= 3 * PyLong_SHIFT) { return (Py_intptr_t) (((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])); } } break; case 4: if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) >= 4 * PyLong_SHIFT) { return (Py_intptr_t) (((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (Py_intptr_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (Py_intptr_t) 0; case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, +digits[0]) case -2: if (8 * sizeof(Py_intptr_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) { return (Py_intptr_t) (((Py_intptr_t)-1)*(((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case 2: if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) { return (Py_intptr_t) ((((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case -3: if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) { return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case 3: if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) { return (Py_intptr_t) ((((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case -4: if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) { return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case 4: if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) { return (Py_intptr_t) ((((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; } #endif if (sizeof(Py_intptr_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else Py_intptr_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (Py_intptr_t) -1; } } else { Py_intptr_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (Py_intptr_t) -1; val = __Pyx_PyInt_As_Py_intptr_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to Py_intptr_t"); return (Py_intptr_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to Py_intptr_t"); return (Py_intptr_t) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
kmp_atomic_cas_cpt.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <stdbool.h> #include <omp.h> #ifdef __cplusplus extern "C" { #endif typedef void* ident_t; extern bool __kmpc_atomic_bool_1_cas_cpt(ident_t *loc, int gtid, char *x, char e, char d, char *pv); extern bool __kmpc_atomic_bool_2_cas_cpt(ident_t *loc, int gtid, short *x, short e, short d, short *pv); extern bool __kmpc_atomic_bool_4_cas_cpt(ident_t *loc, int gtid, int *x, int e, int d, int *pv); extern bool __kmpc_atomic_bool_8_cas_cpt(ident_t *loc, int gtid, long long *x, long long e, long long d, long long *pv); extern char __kmpc_atomic_val_1_cas_cpt(ident_t *loc, int gtid, char *x, char e, char d, char *pv); extern short __kmpc_atomic_val_2_cas_cpt(ident_t *loc, int gtid, short *x, short e, short d, short *pv); extern int __kmpc_atomic_val_4_cas_cpt(ident_t *loc, int gtid, int *x, int e, int d, int *pv); extern long long __kmpc_atomic_val_8_cas_cpt(ident_t *loc, int gtid, long long *x, long long e, long long d, long long *pv); #ifdef __cplusplus } #endif int main() { int ret = 0; bool r; char c0 = 1; char c1 = 2; char c2 = 3; char co = 2; char cc = 0; char cv = 0; short s0 = 11; short s1 = 12; short s2 = 13; short so = 12; short sc = 0; short sv = 0; int i0 = 211; int i1 = 212; int i2 = 213; int io = 212; int ic = 0; int iv = 0; long long l0 = 3111; long long l1 = 3112; long long l2 = 3113; long long lo = 3112; long long lc = 0; long long lv = 0; // initialize OpenMP runtime library omp_set_dynamic(0); // #pragma omp atomic compare update capture // { r = x == e; if(r) { x = d; } else { v = x; } } // char, co == c1 initially, co == c2 finally r = __kmpc_atomic_bool_1_cas_cpt(NULL, 0, &co, c0, c2, &cv); // no-op if (co != c1) { ret++; printf("Error bool_1_cas_cpt no-op: %d != %d\n", co, c1); } if (cv != co) { ret++; printf("Error bool_1_cas_cpt no-op cpt: %d != %d\n", cv, co); } if (r) { ret++; printf("Error bool_1_cas_cpt no-op ret: %d\n", r); } cv = 0; r = __kmpc_atomic_bool_1_cas_cpt(NULL, 0, &co, c1, c2, &cv); if (co != c2) { ret++; printf("Error bool_1_cas_cpt: %d != %d\n", co, c2); } if (cv != 0) { ret++; printf("Error bool_1_cas_cpt cpt: %d != %d\n", cv, 0); } if (!r) { ret++; printf("Error bool_1_cas_cpt ret: %d\n", r); } // short r = __kmpc_atomic_bool_2_cas_cpt(NULL, 0, &so, s0, s2, &sv); // no-op if (so != s1) { ret++; printf("Error bool_2_cas_cpt no-op: %d != %d\n", so, s1); } if (sv != so) { ret++; printf("Error bool_2_cas_cpt no-op cpt: %d != %d\n", sv, so); } if (r) { ret++; printf("Error bool_2_cas_cpt no-op ret: %d\n", r); } sv = 0; r = __kmpc_atomic_bool_2_cas_cpt(NULL, 0, &so, s1, s2, &sv); if (so != s2) { ret++; printf("Error bool_2_cas_cpt: %d != %d\n", so, s2); } if (sv != 0) { ret++; printf("Error bool_2_cas_cpt cpt: %d != %d\n", sv, 0); } if (!r) { ret++; printf("Error bool_2_cas_cpt ret: %d\n", r); } // int r = __kmpc_atomic_bool_4_cas_cpt(NULL, 0, &io, i0, i2, &iv); // no-op if (io != i1) { ret++; printf("Error bool_4_cas_cpt no-op: %d != %d\n", io, i1); } if (iv != io) { ret++; printf("Error bool_4_cas_cpt no-op cpt: %d != %d\n", iv, io); } if (r) { ret++; printf("Error bool_4_cas_cpt no-op ret: %d\n", r); } iv = 0; r = __kmpc_atomic_bool_4_cas_cpt(NULL, 0, &io, i1, i2, &iv); if (io != i2) { ret++; printf("Error bool_4_cas_cpt: %d != %d\n", io, i2); } if (iv != 0) { ret++; printf("Error bool_4_cas_cpt cpt: %d != %d\n", iv, 0); } if (!r) { ret++; printf("Error bool_4_cas_cpt ret: %d\n", r); } // long long r = __kmpc_atomic_bool_8_cas_cpt(NULL, 0, &lo, l0, l2, &lv); // no-op if (lo != l1) { ret++; printf("Error bool_8_cas_cpt no-op: %lld != %lld\n", lo, l1); } if (lv != lo) { ret++; printf("Error bool_8_cas_cpt no-op cpt: %lld != %lld\n", lv, lo); } if (r) { ret++; printf("Error bool_8_cas_cpt no-op ret: %d\n", r); } lv = 0; r = __kmpc_atomic_bool_8_cas_cpt(NULL, 0, &lo, l1, l2, &lv); if (lo != l2) { ret++; printf("Error bool_8_cas_cpt: %lld != %lld\n", lo, l2); } if (lv != 0) { // should not be assigned ret++; printf("Error bool_8_cas_cpt cpt: %lld != %d\n", lv, 0); } if (!r) { ret++; printf("Error bool_8_cas_cpt ret: %d\n", r); } // #pragma omp atomic compare update capture // { if (x == e) { x = d; }; v = x; } // char, co == c2 initially, co == c1 finally cc = __kmpc_atomic_val_1_cas_cpt(NULL, 0, &co, c0, c1, &cv); // no-op if (co != c2) { ret++; printf("Error val_1_cas_cpt no-op: %d != %d\n", co, c2); } if (cv != c2) { ret++; printf("Error val_1_cas_cpt no-op cpt: %d != %d\n", cv, c2); } if (cc != c2) { ret++; printf("Error val_1_cas_cpt no-op ret: %d != %d\n", cc, c2); } cc = __kmpc_atomic_val_1_cas_cpt(NULL, 0, &co, c2, c1, &cv); if (co != c1) { ret++; printf("Error val_1_cas_cpt: %d != %d\n", co, c1); } if (cv != c1) { ret++; printf("Error val_1_cas_cpt cpt: %d != %d\n", cv, c1); } if (cc != c2) { ret++; printf("Error val_1_cas_cpt ret: %d != %d\n", cc, c2); } // short sc = __kmpc_atomic_val_2_cas_cpt(NULL, 0, &so, s0, s1, &sv); // no-op if (so != s2) { ret++; printf("Error val_2_cas_cpt no-op: %d != %d\n", so, s2); } if (sv != s2) { ret++; printf("Error val_2_cas_cpt no-op cpt: %d != %d\n", sv, s2); } if (sc != s2) { ret++; printf("Error val_2_cas_cpt no-op ret: %d != %d\n", sc, s2); } sc = __kmpc_atomic_val_2_cas_cpt(NULL, 0, &so, s2, s1, &sv); if (so != s1) { ret++; printf("Error val_2_cas_cpt: %d != %d\n", so, s1); } if (sv != s1) { ret++; printf("Error val_2_cas_cpt cpt: %d != %d\n", sv, s1); } if (sc != s2) { ret++; printf("Error val_2_cas_cpt ret: %d != %d\n", sc, s2); } // int ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &io, i0, i1, &iv); // no-op if (io != i2) { ret++; printf("Error val_4_cas_cpt no-op: %d != %d\n", io, i2); } if (iv != i2) { ret++; printf("Error val_4_cas_cpt no-op cpt: %d != %d\n", iv, i2); } if (ic != i2) { ret++; printf("Error val_4_cas_cpt no-op ret: %d != %d\n", ic, i2); } ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &io, i2, i1, &iv); if (io != i1) { ret++; printf("Error val_4_cas_cpt: %d != %d\n", io, i1); } if (iv != i1) { ret++; printf("Error val_4_cas_cpt cpt: %d != %d\n", io, i1); } if (ic != i2) { ret++; printf("Error val_4_cas_cpt ret: %d != %d\n", ic, i2); } // long long lc = __kmpc_atomic_val_8_cas_cpt(NULL, 0, &lo, l0, l1, &lv); // no-op if (lo != l2) { ret++; printf("Error val_8_cas_cpt no-op: %lld != %lld\n", lo, l2); } if (lv != l2) { ret++; printf("Error val_8_cas_cpt no-op cpt: %lld != %lld\n", lv, l2); } if (lc != l2) { ret++; printf("Error val_8_cas_cpt no-op ret: %lld != %lld\n", lc, l2); } lc = __kmpc_atomic_val_8_cas_cpt(NULL, 0, &lo, l2, l1, &lv); if (lo != l1) { ret++; printf("Error val_8_cas_cpt: %lld != %lld\n", lo, l1); } if (lv != l1) { ret++; printf("Error val_8_cas_cpt cpt: %lld != %lld\n", lv, l1); } if (lc != l2) { ret++; printf("Error val_8_cas_cpt ret: %lld != %lld\n", lc, l2); } // check in parallel i0 = 1; i1 = 0; for (io = 0; io < 5; ++io) { #pragma omp parallel num_threads(2) private(i2, ic, r, iv) { if (omp_get_thread_num() == 0) { // th0 waits for th1 to increment i1, then th0 increments i0 #pragma omp atomic read i2 = i1; ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &i0, i2, i2 + 1, &iv); while(ic != i2) { if (iv != ic) { ret++; printf("Error 1 in parallel cpt, %d != %d\n", iv, ic); } #pragma omp atomic read i2 = i1; ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &i0, i2, i2 + 1, &iv); } if (iv != i2 + 1) { ret++; printf("Error 2 in parallel cpt, %d != %d\n", iv, i2 + 1); } } else { // th1 increments i1 if it is equal to i0 - 1, letting th0 to proceed r = 0; while(!r) { #pragma omp atomic read i2 = i0; r = __kmpc_atomic_bool_4_cas_cpt(NULL, 0, &i1, i2 - 1, i2, &iv); } } } } if (i0 != 6 || i1 != 5) { ret++; printf("Error in parallel, %d != %d or %d != %d\n", i0, 6, i1, 5); } if (ret == 0) printf("passed\n"); return ret; }
cpu_ctc.h
#pragma once #include <tuple> #include <cmath> #include <limits> #include <algorithm> #include <numeric> #if !defined(CTC_DISABLE_OMP) && !defined(APPLE) #include <omp.h> #endif #include "ctc_helper.h" template<typename ProbT> class CpuCTC { public: // Noncopyable CpuCTC(int alphabet_size /*vocab size, V*/, int minibatch /*batchsize, B*/, void* workspace /*memory block*/, int num_threads, int blank_label /*blank id, default 0*/) : alphabet_size_(alphabet_size), minibatch_(minibatch), num_threads_(num_threads), workspace_(workspace), blank_label_(blank_label) { #if defined(CTC_DISABLE_OMP) || defined(APPLE) #else if (num_threads > 0) { omp_set_num_threads(num_threads); } else { num_threads_ = omp_get_max_threads(); } #endif }; // non-copy and non-assignment CpuCTC(const CpuCTC&) = delete; CpuCTC& operator=(const CpuCTC&) = delete; // compute loss and grad ctcStatus_t cost_and_grad(const ProbT* const activations /*logits, (T, B, V)*/, ProbT *grads, /*out*/ ProbT* costs, /*out*/ const int* const flat_labels /*lables in flat form, L1+...+LB=sum(label_lengths)*/, const int* const label_lengths /*label len, (B,)*/, const int* const input_lengths /*logits len, (B,)*/); // compute \alpha score ctcStatus_t score_forward(const ProbT* const activations /*logits, (T, B, V)*/, ProbT* costs, /*out*/ const int* const flat_labels, const int* const label_lengths, const int* const input_lengths /*logits len, (B,)*/); private: class CpuCTC_metadata { private: int setup_labels(const int* const labels, int blank_label, int L, int S); public: // mb = minibatch = batch_size // L = |l| // S = 2|l| + 1 // T = times // alphabet_size = V // blank_label = blank_id, default = 0 // labels = l CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size, void* workspace, size_t bytes_used, int blank_label, const int* const labels); ProbT* alphas; //forward probs, (T,S) ProbT* betas; // bacwrad probs, (S,) int* labels_w_blanks; // 2L + 1 = S, (S,) int* e_inc; // e(backward) max increase step in S space, (S,) int* s_inc; // s(forward) max increase step in S space, (S,) ProbT* output; // (V,) int repeats; // number of continue same token in label l }; // CpuCTC_metadata int alphabet_size_; // Number of characters plus blank, vocab_size + 1 = V int minibatch_; // B int num_threads_; int blank_label_; // blank_id void* workspace_; // activations to probs, (T, B, V) void softmax(const ProbT* const activations, ProbT* probs /*out*/, const int* const input_lengths); std::tuple<ProbT, bool> cost_and_grad_kernel(ProbT *grad, const ProbT* const probs, const int* const labels, int T, int L, int mb, size_t bytes_used); ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas); ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs, ProbT log_partition, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas, ProbT* betas, ProbT* output); }; template<typename ProbT> CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size, void* workspace, size_t bytes_used, int blank_label, const int* const labels) { // bytes_used = offset in workspace // 1. alphas, (T, S), -inf, prob in log domain alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * S * T; std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>()); // 2. betas, (S), -inf, prob in log domain betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * S; std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>()); // 3. labels_w_blanks, (S,) labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; // 4. e max increase index, (S,) e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; // 5. s max increase index, (S,) s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; // 6. output, (V,) output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * alphabet_size; // 7. repeats = continuous repeat charactor num, label l expand to l' repeats = setup_labels(labels, blank_label, L, S); } template<typename ProbT> int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels, int blank_label, int L, int S) { // L, label, abbc // s_inc, 1211 // e_inc, 2111 // S, label with blank, ab-b-c // repeats: continuous repeat charactor num, e.g. abbc=1, abbbc=2 // asssume L + repeats = T // s_counter `L+repeats - remain time step` to go forward, Fig.3 top-right circule // e_counter time step have go, Fig.3 left-bottom circlue int e_counter = 0; int s_counter = 0; // s index of s_inc s_inc[s_counter++] = 1; // s_inc[0] = 1, start from blank int repeats = 0; // continuous repeat charactor num for (int i = 1; i < L; ++i) { if (labels[i-1] == labels[i]) { // repeat label, max step is 1 // need go into blank, then to next label. s_inc[s_counter++] = 1; s_inc[s_counter++] = 1; e_inc[e_counter++] = 1; e_inc[e_counter++] = 1; ++repeats; } else { // not repeat label, max step is 2 // can go to blank, or next label s_inc[s_counter++] = 2; e_inc[e_counter++] = 2; } } e_inc[e_counter++] = 1; // end to blank // fill label with blank for (int i = 0; i < L; ++i) { labels_w_blanks[2 * i] = blank_label; labels_w_blanks[2 * i + 1] = labels[i]; } labels_w_blanks[S - 1] = blank_label; // last is blank return repeats; } // compute softmax on logits // stalbe softmax = {shifx = x - np.max(x); exps = np.exp(shiftx); retrun exps / np.sum(exps);} template<typename ProbT> void CpuCTC<ProbT>::softmax(const ProbT* const activations /*(T, B, V)*/, ProbT* probs /*out*/, const int* const input_lengths /*(B,)*/) { ProbT min_T = std::numeric_limits<ProbT>::min(); // minibatch_= B, index mb, col index // input_lengths = (B,), index c, row index // alphabet_size_ = V, index r // activations = logits, (T, B, D) #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { for(int c = 0; c < input_lengths[mb]; ++c) { // col offset = offset in B dim int col_offset = (c * minibatch_ + mb) * alphabet_size_; ProbT max_activation = -std::numeric_limits<ProbT>::infinity(); for(int r = 0; r < alphabet_size_; ++r) max_activation = std::max(max_activation, activations[col_offset + r]); ProbT denom = ProbT(0.); for(int r = 0; r < alphabet_size_; ++r) { probs[col_offset + r] = std::exp(activations[col_offset + r] - max_activation); denom += probs[col_offset + r]; } for(int r = 0; r < alphabet_size_; ++r) { probs[col_offset + r] /= denom; if (probs[col_offset + r] < min_T) { probs[col_offset + r] = min_T; } } } } } // compute loss and grad for one example template<typename ProbT> std::tuple<ProbT, bool> CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs, const int* const labels, int T, int L, int mb, size_t bytes_used) { // grad (T, V), probs (T, V) // bytes_used = bytes offset in workspace const int S = 2*L + 1; // Number of labels with blanks CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels); // foward and backward loss diff less than 0.1 bool over_threshold = false; // A valid example is one for which L + repeats <= T if (L + ctcm.repeats > T) { // L is the label len. // count L with expand blank between repeat label // l=aab \bar{l} =a-ab, repeats is the count of blank should be insert between repeat label // \bar(l) is the min count of expansion of l. return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0 } // forward log likelihood, alphas ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas); // backward log likelihook, betas, output, grad ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas, ctcm.betas, ctcm.output); // foward ll and backward ll diff more than 0.1 will set over_threshold=True ProbT diff = std::abs(llForward - llBackward); if (diff > ctc_helper::threshold) { over_threshold = true; } // return (nll, whether diff over 0.1) return std::make_tuple(-llForward, over_threshold); } // Computes forward probabilities for one example, impelement of Eq.6 template<typename ProbT> ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const label_w_blank, ProbT* alphas) { // probs real shape (T, B, V), // probs (T, 1, V), point to t0 probs. // lables = label_w_blank, (S,) // e_inc/s_inc, (S,) // alphas (T, S) // S/2 = int((2L+1)/2 ) = L // repeats = blank should be insert between repeat label, e.g abb, ab-b- // See Fig. 3, start and end are index of S, from 0-S // start = 0, if L + repeats < T // start = 1, if L + repeats = T, special case, e.g l=aaa, T=6. only can start from l'_1 // L + repeats > T is not valid, and is filter by `cost_and_grad_kernel` // end = 2, if S > 1, i.e L \neq 0, end is sentinel, i.e i < end // end = 1, if S <=1, i.e L = 0 int start = (((S /2) + repeats - T) < 0) ? 0 : 1, end = S > 1 ? 2 : 1; // alpha, beta init with -inf in CpuCTC_metadata // init \alpha_1(1) and \alpha_1(2) for (int i = start; i < end; ++i) { alphas[i] = std::log(probs[label_w_blank[i]]); } // \alpha_2 - \alpha_{T-1} for(int t = 1; t < T; ++t) { int remain = (S / 2) + repeats - (T - t); if(remain >= 0) { // remian time step less than L+repeats, start should start inc step, not horizontal jump, // or will not be valid path to go throught all sym in L' // condtion in Fig.3, right-top circle start += s_inc[remain]; } if(t <= (S / 2) + repeats) { // condtion Fig.3, left-bottom circle // if t <= L+repeats end should inc step, or will horizontal jump. // maybe first L+repeats will eat all sym of L' end += e_inc[t - 1]; } // idx1 t, idx2 t-1 of alpha ;idx3 t of global probs int startloop = start; int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (minibatch_ * alphabet_size_); if (start == 0) { // in Fig.3, process s=0 line, blank horizontal jump alphas[idx1] = alphas[idx2] + std::log(probs[idx3 + blank_label_]); // forward compute of alpha recurse from s=1 startloop += 1; } for(int i = startloop; i < end; ++i) { // normal condtion // prev_sum = alpha_{t-1}(i) + alpha_{t-1}(i-1) ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[idx2 + i], alphas[idx2 + (i-1)]); // jump two if not on blank and not on repeat. if (label_w_blank[i] != blank_label_ && i != 1 && label_w_blank[i] != label_w_blank[i-2]) { // in this for loop, lowest i is 1, which only can jump one step from pre state prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[idx2 + (i-2)]); } // alpha_t(i) = prev_sum + probs_t(i) alphas[idx1 + i] = prev_sum + std::log(probs[idx3 + label_w_blank[i]]); } } ProbT loglikelihood = ctc_helper::neg_inf<ProbT>(); // log-likelihood = \alpha_T(|l'|-1) + \alpha_T(|l'|) for(int i = start; i < end; ++i) { loglikelihood = ctc_helper::log_plus<ProbT>()(loglikelihood, alphas[(T - 1) * S + i]); } return loglikelihood; } // Starting from T, we sweep backward over the alpha array computing one column // of betas as we go. At each position we can update product alpha * beta and then // sum into the gradient associated with each label. // NOTE computes gradient w.r.t UNNORMALIZED final layer activations(i.e. logits). // Assumed passed in grads are already zeroed! template<typename ProbT> ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs, ProbT log_partition, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels_w_blanks, ProbT* alphas, ProbT* betas, ProbT* output) { // grad, (T, B, V), zeroed, input point to (T, 1, V) // probs (T, 1, V) // log_partition = loglikelihood of Forward (1,) // labels = labels_w_blanks, (S,) // output, (V,), `sum_{s \in lab(z, k)} \alpha_t(s) \beta_t(s)` // beats, (S,) // backward start/end index, end is sentinel not include // start <= i < end int start = S > 1 ? (S - 2) : 0, end = (T > (S / 2) + repeats) ? S : S-1; // init output with -inf std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>()); //set the starting values in the beta column at the very right edge // Compute Beta/output at T-1, for (int i = start; i < end; ++i) { betas[i] = std::log(probs[(T - 1) * (minibatch_ * alphabet_size_) + labels_w_blanks[i]]); //compute alpha * beta in log space at this position in (S, T) space alphas[(T - 1) * S + i] += betas[i]; //update the gradient associated with this label //essentially performing a reduce-by-key in a sequential manner output[labels_w_blanks[i]] = ctc_helper::log_plus<ProbT>()( output[labels_w_blanks[i]], alphas[(T - 1) * S + i] ); } //update the gradient wrt to each unique label for (int i = 0; i < alphabet_size_; ++i) { int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i; if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() || probs[idx3] == 0.0) { grad[idx3] = probs[idx3]; } else { grad[idx3] = probs[idx3] - std::exp(output[i] - std::log(probs[idx3]) - log_partition); } } //loop from the second to last column all the way to the left for(int t = T - 2; t >= 0; --t) { int remain = (S / 2) + repeats - (T - t); if(remain >= -1) start -= s_inc[remain + 1]; if(t < (S / 2) + repeats) end -= e_inc[t]; int endloop = end == S ? end - 1 : end; int idx1 = t * S, idx3 = t * (minibatch_ * alphabet_size_); std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>()); // process line <= S-2 for(int i = start; i < endloop; ++i) { ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]); // Skip two if not on blank and not on repeat. if (labels_w_blanks[i] != blank_label_ && i != (S-2) && labels_w_blanks[i] != labels_w_blanks[i+2]){ next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]); } betas[i] = next_sum + std::log(probs[idx3 + labels_w_blanks[i]]); //compute alpha * beta in log space alphas[i + idx1] += betas[i]; //update the gradient associated with this label output[labels_w_blanks[i]] = ctc_helper::log_plus<ProbT>()( output[labels_w_blanks[i]], alphas[idx1 + i] ); } // process line S-1 if (end == S) { betas[(S-1)] = betas[(S-1)] + std::log(probs[idx3 + blank_label_]); alphas[idx1 + (S-1)] += betas[(S-1)]; output[labels_w_blanks[S-1]] = ctc_helper::log_plus<ProbT>()( output[labels_w_blanks[S-1]], alphas[idx1 + (S-1)] ); } //go over the unique labels and compute the final grad // wrt to each one at this time step for (int i = 0; i < alphabet_size_; ++i) { if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() || probs[idx3] == 0.0) { grad[idx3] = probs[idx3]; } else { grad[idx3] = probs[idx3] - std::exp(output[i] - std::log(probs[idx3]) - log_partition); } ++idx3; } } // log-likelihood in backward. ProbT loglikelihood = ctc_helper::neg_inf<ProbT>(); for(int i = start; i < end; ++i) { loglikelihood = ctc_helper::log_plus<ProbT>()(loglikelihood, betas[i]); } return loglikelihood; } template<typename ProbT> ctcStatus_t CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations, ProbT *grads, ProbT *costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { // activations = logits, (T, B, V) // input_lengths, (B,) // label_lengths, (B,) // minibatch_ = B // grads, (T, B, V) // probs, (T, B, V) if (activations == nullptr || grads == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return CTC_STATUS_INVALID_VALUE; // first element in workspace is probs, (T, B, V) ProbT* probs = static_cast<ProbT *>(workspace_); int maxT = *std::max_element(input_lengths, input_lengths + minibatch_); size_t bytes_used = sizeof(ProbT) * maxT * minibatch_ * alphabet_size_; //per minibatch memory size_t per_minibatch_bytes = 0; int maxL = *std::max_element(label_lengths, label_lengths + minibatch_); int maxS = 2 * maxL + 1; //output, (V,) per_minibatch_bytes += sizeof(float) * alphabet_size_; //alphas, (T, S) per_minibatch_bytes += sizeof(float) * maxS * maxT; //betas, (S,) per_minibatch_bytes += sizeof(float) * maxS; //labels w/blanks, e_inc, s_inc, (S,) per_minibatch_bytes += 3 * sizeof(int) * maxS; // batch softmax softmax(activations, probs, input_lengths); // compute loss and grad one by one #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int L = label_lengths[mb]; // Number of labels in transcription bool mb_status; std::tie(costs[mb], mb_status) = cost_and_grad_kernel(grads + mb * alphabet_size_ /*(T, 1, V)*/, probs + mb * alphabet_size_ /*(T, 1, V)*/, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0), T, L, mb, bytes_used + mb * per_minibatch_bytes); } return CTC_STATUS_SUCCESS; } // compute forward log-likelihood template<typename ProbT> ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { // activations = logits, (T, B, V) // cost, (B,) // input_lengths, (B,) // label_lengths, (B,) // minibatch_ = B // grads, (T, B, V) // probs, (T, B, V) if (activations == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return CTC_STATUS_INVALID_VALUE; // probs is first element of workspace ProbT* probs = static_cast<ProbT *>(workspace_); int maxT = *std::max_element(input_lengths, input_lengths + minibatch_); size_t bytes_used = sizeof(ProbT) * maxT * minibatch_ * alphabet_size_; //per minibatch memory size_t per_minibatch_bytes = 0; int maxL = *std::max_element(label_lengths, label_lengths + minibatch_); int maxS = 2 * maxL + 1; //output, sum_{s \in lab(z,k)} \alpha_t(s) \beta_t(s) per_minibatch_bytes += sizeof(float) * alphabet_size_; //alphas per_minibatch_bytes += sizeof(float) * maxT * maxS; //betas per_minibatch_bytes += sizeof(float) * maxS; //labels w/blanks, e_inc, s_inc per_minibatch_bytes += 3 * sizeof(int) * maxS; // batch softmax softmax(activations, probs, input_lengths); #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int L = label_lengths[mb]; // Number of labels in transcription const int S = 2*L + 1; // Number of labels with blanks CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used + mb * per_minibatch_bytes, blank_label_, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0)); if (L + ctcm.repeats > T) // \bar{l} > T, invalid, set loss to zero costs[mb] = ProbT(0); else { // nll of forward as loss costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas); } } return CTC_STATUS_SUCCESS; }
Example_nthrs_nesting.1.c
/* * @@name: nthrs_nesting.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <stdio.h> #include <omp.h> int main (void) { omp_set_nested(1); omp_set_dynamic(0); #pragma omp parallel { #pragma omp parallel { #pragma omp single { /* * If OMP_NUM_THREADS=2,3 was set, the following should print: * Inner: num_thds=3 * Inner: num_thds=3 * * If nesting is not supported, the following should print: * Inner: num_thds=1 * Inner: num_thds=1 */ printf ("Inner: num_thds=%d\n", omp_get_num_threads()); } } #pragma omp barrier omp_set_nested(0); #pragma omp parallel { #pragma omp single { /* * Even if OMP_NUM_THREADS=2,3 was set, the following should * print, because nesting is disabled: * Inner: num_thds=1 * Inner: num_thds=1 */ printf ("Inner: num_thds=%d\n", omp_get_num_threads()); } } #pragma omp barrier #pragma omp single { /* * If OMP_NUM_THREADS=2,3 was set, the following should print: * Outer: num_thds=2 */ printf ("Outer: num_thds=%d\n", omp_get_num_threads()); } } return 0; }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriately. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L A H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLAHEImage() is a variant of adaptive histogram equalization in which the % contrast amplification is limited, so as to reduce this problem of noise % amplification. % % Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic Press, 1994. % % The format of the CLAHEImage method is: % % MagickBooleanType CLAHEImage(Image *image,const size_t width, % const size_t height,const size_t number_bins,const double clip_limit, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the tile divisions to use in horizontal direction. % % o height: the height of the tile divisions to use in vertical direction. % % o number_bins: number of bins for histogram ("dynamic range"). % % o clip_limit: contrast limit for localised changes in contrast. A limit % less than 1 results in standard non-contrast limited AHE. % % o exception: return any errors or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins, size_t *histogram) { #define NumberCLAHEGrays (65536) register ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* Compute total number of excess pixels. */ cumulative_excess=0; for (i=0; i < (ssize_t) number_bins; i++) { excess=(ssize_t) histogram[i]-(ssize_t) clip_limit; if (excess > 0) cumulative_excess+=excess; } /* Clip histogram and redistribute excess pixels across all bins. */ step=cumulative_excess/number_bins; excess=(ssize_t) (clip_limit-step); for (i=0; i < (ssize_t) number_bins; i++) { if ((double) histogram[i] > clip_limit) histogram[i]=(size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess-=histogram[i]-excess; histogram[i]=(size_t) clip_limit; } else { cumulative_excess-=step; histogram[i]+=step; } } /* Redistribute remaining excess. */ do { register size_t *p; size_t *q; previous_excess=cumulative_excess; p=histogram; q=histogram+number_bins; while ((cumulative_excess != 0) && (p < q)) { step=number_bins/cumulative_excess; if (step < 1) step=1; for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step) if ((double) *p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const size_t number_bins, const unsigned short *lut,const unsigned short *pixels,size_t *histogram) { register const unsigned short *p; register ssize_t i; /* Classify the pixels into a gray histogram. */ for (i=0; i < (ssize_t) number_bins; i++) histogram[i]=0L; p=pixels; for (i=0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q=p+tile_info->width; while (p < q) histogram[lut[*p++]]++; q+=clahe_info->width; p=q-tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12, const size_t *Q22,const size_t *Q11,const size_t *Q21, const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels) { ssize_t y; unsigned short intensity; /* Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y=(ssize_t) tile->height; y > 0; y--) { register ssize_t x; for (x=(ssize_t) tile->width; x > 0; x--) { intensity=lut[*pixels]; *pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width* tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)* Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+ (tile->width-x)*Q21[intensity]))); } pixels+=(clahe_info->width-tile->width); } } static void GenerateCLAHELut(const RangeInfo *range_info, const size_t number_bins,unsigned short *lut) { ssize_t i; unsigned short delta; /* Scale input image [intensity min,max] to [0,number_bins-1]. */ delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1); for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i]=(unsigned short) ((i-range_info->min)/delta); } static void MapCLAHEHistogram(const RangeInfo *range_info, const size_t number_bins,const size_t number_pixels,size_t *histogram) { double scale, sum; register ssize_t i; /* Rescale histogram to range [min-intensity .. max-intensity]. */ scale=(double) (range_info->max-range_info->min)/number_pixels; sum=0.0; for (i=0; i < (ssize_t) number_bins; i++) { sum+=histogram[i]; histogram[i]=(size_t) (range_info->min+scale*sum); if (histogram[i] > range_info->max) histogram[i]=range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const RangeInfo *range_info, const size_t number_bins,const double clip_limit,unsigned short *pixels) { MemoryInfo *tile_cache; register unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return(MagickTrue); tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y, number_bins*sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return(MagickFalse); lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut)); if (lut == (unsigned short *) NULL) { tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickFalse); } tiles=(size_t *) GetVirtualMemoryBlob(tile_cache); limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins); if (limit < 1UL) limit=1UL; /* Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info,number_bins,lut); p=pixels; for (y=0; y < (ssize_t) clahe_info->y; y++) { register ssize_t x; for (x=0; x < (ssize_t) clahe_info->x; x++) { size_t *histogram; histogram=tiles+(number_bins*(y*clahe_info->x+x)); GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram); ClipCLAHEHistogram((double) limit,number_bins,histogram); MapCLAHEHistogram(range_info,number_bins,tile_info->width* tile_info->height,histogram); p+=tile_info->width; } p+=clahe_info->width*(tile_info->height-1); } /* Interpolate greylevel mappings to get CLAHE image. */ p=pixels; for (y=0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; register ssize_t x; tile.height=tile_info->height; tile.y=y-1; offset.y=tile.y+1; if (y == 0) { /* Top row. */ tile.height=tile_info->height >> 1; tile.y=0; offset.y=0; } else if (y == (ssize_t) clahe_info->y) { /* Bottom row. */ tile.height=(tile_info->height+1) >> 1; tile.y=clahe_info->y-1; offset.y=tile.y; } for (x=0; x <= (ssize_t) clahe_info->x; x++) { tile.width=tile_info->width; tile.x=x-1; offset.x=tile.x+1; if (x == 0) { /* Left column. */ tile.width=tile_info->width >> 1; tile.x=0; offset.x=0; } else if (x == (ssize_t) clahe_info->x) { /* Right column. */ tile.width=(tile_info->width+1) >> 1; tile.x=clahe_info->x-1; offset.x=tile.x; } InterpolateCLAHE(clahe_info, tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */ tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */ tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */ tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */ &tile,lut,p); p+=tile.width; } p+=clahe_info->width*(tile.height-1); } lut=(unsigned short *) RelinquishMagickMemory(lut); tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width, const size_t height,const size_t number_bins,const double clip_limit, ExceptionInfo *exception) { #define CLAHEImageTag "CLAHE/Image" CacheView *image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo *pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); range_info.min=0; range_info.max=NumberCLAHEGrays-1; tile_info.width=width; if (tile_info.width == 0) tile_info.width=image->columns >> 3; tile_info.height=height; if (tile_info.height == 0) tile_info.height=image->rows >> 3; tile_info.x=0; if ((image->columns % tile_info.width) != 0) tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width); tile_info.y=0; if ((image->rows % tile_info.height) != 0) tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height); clahe_info.width=image->columns+tile_info.x; clahe_info.height=image->rows+tile_info.y; clahe_info.x=(ssize_t) clahe_info.width/tile_info.width; clahe_info.y=(ssize_t) clahe_info.height/tile_info.height; pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height* sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache); colorspace=image->colorspace; if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse) { pixel_cache=RelinquishVirtualMemory(pixel_cache); return(MagickFalse); } /* Initialize CLAHE pixels. */ image_view=AcquireVirtualCacheView(image,exception); progress=0; status=MagickTrue; n=0; for (y=0; y < (ssize_t) clahe_info.height; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y- (tile_info.y >> 1),clahe_info.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) clahe_info.width; x++) { pixels[n++]=ScaleQuantumToShort(p[0]); p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ? (size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); /* Push CLAHE pixels to CLAHE image. */ image_view=AcquireAuthenticCacheView(image,exception); n=clahe_info.width*(tile_info.y >> 1); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } n+=tile_info.x >> 1; for (x=0; x < (ssize_t) image->columns; x++) { q[0]=ScaleShortToQuantum(pixels[n++]); q+=GetPixelChannels(image); } n+=(clahe_info.width-image->columns-(tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); pixel_cache=RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); status=InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black)); white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,0,0,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum *magick_restrict r; GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); } SetPixelViaPixelInfo(enhance_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=(double) p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(intensity))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) memset(black,0,sizeof(*black)); (void) memset(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaImageTag "Gamma/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; pixel2=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel3=zero; area=point.y; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel3); offset+=cube_size; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel4); pixel=zero; area=point.z; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, area,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriately. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (IsPixelGray(image,q) == MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e B a l a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteBalanceImage() applies white balancing to an image according to a % grayworld assumption in the LAB colorspace. % % The format of the WhiteBalanceImage method is: % % MagickBooleanType WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image *image, ExceptionInfo *exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView *image_view; const char *artifact; double a_mean, b_mean; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=TransformImageColorspace(image,LabColorspace,exception); a_mean=0.0; b_mean=0.0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { a_mean+=QuantumScale*GetPixela(image,p)-0.5; b_mean+=QuantumScale*GetPixelb(image,p)-0.5; p+=GetPixelChannels(image); } } a_mean/=((double) image->columns*image->rows); b_mean/=((double) image->columns*image->rows); progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; /* Scale the chroma distance shifted according to amount of luminance. */ a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean; b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean; SetPixela(image,ClampToQuantum(a),q); SetPixelb(image,ClampToQuantum(b),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); artifact=GetImageArtifact(image,"white-balance:vibrance"); if (artifact != (const char *) NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* Level the a & b channels. */ flags=ParseGeometry(artifact,&geometry_info); black_point=geometry_info.rho; if ((flags & PercentValue) != 0) black_point*=(double) (QuantumRange/100.0); channel_mask=SetImageChannelMask(image,aChannel | bChannel); status&=LevelImage(image,black_point,(double) QuantumRange-black_point, 1.0,exception); (void) SetImageChannelMask(image,channel_mask); } status&=TransformImageColorspace(image,sRGBColorspace,exception); return(status); }
GB_unop__bnot_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__bnot_int8_int8) // op(A') function: GB (_unop_tran__bnot_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = ~(z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__bnot_int8_int8) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = ~(z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__bnot_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",fx_op); *fx_op=(char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",fx_op); *fx_op=(char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",fx_op); *fx_op=(char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",fx_op); *fx_op=(char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",fx_op); *fx_op=(char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",fx_op); *fx_op=(char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",fx_op); *fx_op=(char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",fx_op); *fx_op=(char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",fx_op); *fx_op=(char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",fx_op); *fx_op=(char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",fx_op); *fx_op=(char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",fx_op); *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info, const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, double const value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireQuantumMemory(1,sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*(*value)); } statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=mean; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=standard_deviation; } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; register size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace(c) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; PixelInfo pixel; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void) CopyMagickString(name,p,MagickPathExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=length; } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors, ConstantString(name),ClonePixelInfo(&pixel)); p+=length; } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } case IndexPixelChannel: return(0.0); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } char *q, *subexpression; double alpha, gamma, sans, value; register const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); p=subexpression; for (q=(char *) p; (*q != ':') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } *q='\0'; if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="alpha"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type="gray"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6, MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+3,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; p=subexpression; for (q=(char *) p; (*q != ',') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } for (*q='\0'; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; p=subexpression; for (q=(char *) p; (*q != ',') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent); p=subexpression; for (q=(char *) p; (*q != ',') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } for (*q='\0'; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; length=CopyMagickString(subexpression,expression+3,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; p=subexpression; for (q=(char *) p; (*q != ',') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent); p=subexpression; for (q=(char *) p; (*q != ',') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } *q='\0'; if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { /* Round the fraction to nearest integer. */ alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition test, expression). */ length=CopyMagickString(subexpression,expression+6,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; p=subexpression; for (q=(char *) p; (*q != ',') && (*q != '\0'); q++) if (*q == '(') for ( ; (*q != ')') && (*q != '\0'); q++); if (*q == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } for (*q='\0'; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
fft-cuda.c
/* Copyright 2013, 2015. The Regents of the University of California. * Copyright 2019. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de> * * * Internal interface to the CUFFT library used in fft.c. */ #include <stdbool.h> #include <complex.h> #include <assert.h> #include "misc/misc.h" #include "num/multind.h" #include "fft-cuda.h" #ifdef USE_CUDA #include <cufft.h> #include "num/gpuops.h" #ifndef CFL_SIZE #define CFL_SIZE sizeof(complex float) #endif struct fft_cuda_plan_s { cufftHandle cufft; struct fft_cuda_plan_s* chain; bool backwards; long batch; long idist; long odist; }; struct iovec { long n; long is; long os; }; static struct fft_cuda_plan_s* fft_cuda_plan0(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards) { PTR_ALLOC(struct fft_cuda_plan_s, plan); unsigned int N = D; plan->batch = 1; plan->odist = 0; plan->idist = 0; plan->backwards = backwards; plan->chain = NULL; struct iovec dims[N]; struct iovec hmdims[N]; assert(0 != flags); // the cufft interface is strange, but we do our best... unsigned int k = 0; unsigned int l = 0; for (unsigned int i = 0; i < N; i++) { if (1 == dimensions[i]) continue; if (MD_IS_SET(flags, i)) { dims[k].n = dimensions[i]; dims[k].is = istrides[i] / CFL_SIZE; dims[k].os = ostrides[i] / CFL_SIZE; k++; } else { hmdims[l].n = dimensions[i]; hmdims[l].is = istrides[i] / CFL_SIZE; hmdims[l].os = ostrides[i] / CFL_SIZE; l++; } } assert(k > 0); int cudims[k]; int cuiemb[k]; int cuoemb[k]; long batchdims[l]; long batchistr[l]; long batchostr[l]; int lis = dims[0].is; int los = dims[0].os; if (k > 3) goto errout; for (unsigned int i = 0; i < k; i++) { // assert(dims[i].is == lis); // assert(dims[i].os == los); cudims[k - 1 - i] = dims[i].n; cuiemb[k - 1 - i] = dims[i].n; cuoemb[k - 1 - i] = dims[i].n; lis = dims[i].n * dims[i].is; los = dims[i].n * dims[i].os; } for (unsigned int i = 0; i < l; i++) { batchdims[i] = hmdims[i].n; batchistr[i] = hmdims[i].is; batchostr[i] = hmdims[i].os; } int istride = dims[0].is; int ostride = dims[0].os; int idist = lis; int odist = los; int cubs = 1; // check that batch dimensions can be collapsed to one unsigned int bi = md_calc_blockdim(l, batchdims, batchistr, hmdims[0].is); unsigned int bo = md_calc_blockdim(l, batchdims, batchostr, hmdims[0].os); if (bi != bo) goto errout; if (bi > 0) { idist = hmdims[0].is; odist = hmdims[0].os; cubs = md_calc_size(bi, batchdims); } if (l != bi) { // check that batch dimensions can be collapsed to one if (l - bi != md_calc_blockdim(l - bi, batchdims + bi, batchistr + bi, hmdims[bi].is)) goto errout; if (l - bo != md_calc_blockdim(l - bo, batchdims + bo, batchostr + bo, hmdims[bo].os)) goto errout; plan->idist = hmdims[bi].is; plan->odist = hmdims[bo].os; plan->batch = md_calc_size(l - bi, batchdims + bi); } assert(k <= 3); int err; #pragma omp critical err = cufftPlanMany(&plan->cufft, k, cudims, cuiemb, istride, idist, cuoemb, ostride, odist, CUFFT_C2C, cubs); if (CUFFT_SUCCESS != err) goto errout; return PTR_PASS(plan); errout: PTR_FREE(plan); return NULL; } struct fft_cuda_plan_s* fft_cuda_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards) { struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, flags, ostrides, istrides, backwards); if (NULL != plan) return plan; int lsb = ffs(flags) - 1; if (flags & lsb) { // FIXME: this couldbe better... struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, lsb, ostrides, istrides, backwards); if (NULL == plan) return NULL; plan->chain = fft_cuda_plan(D, dimensions, MD_CLEAR(flags, lsb), ostrides, ostrides, backwards); if (NULL == plan->chain) { fft_cuda_free_plan(plan); return NULL; } return plan; } return NULL; } void fft_cuda_free_plan(struct fft_cuda_plan_s* cuplan) { if (NULL != cuplan->chain) fft_cuda_free_plan(cuplan->chain); cufftDestroy(cuplan->cufft); xfree(cuplan); } void fft_cuda_exec(struct fft_cuda_plan_s* cuplan, complex float* dst, const complex float* src) { assert(cuda_ondevice(src)); assert(cuda_ondevice(dst)); assert(NULL != cuplan); int err; for (int i = 0; i < cuplan->batch; i++) { if (CUFFT_SUCCESS != (err = cufftExecC2C(cuplan->cufft, (cufftComplex*)src + i * cuplan->idist, (cufftComplex*)dst + i * cuplan->odist, (!cuplan->backwards) ? CUFFT_FORWARD : CUFFT_INVERSE))) error("CUFFT: %d\n", err); } if (NULL != cuplan->chain) fft_cuda_exec(cuplan->chain, dst, dst); } #endif
ams.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" #include "_hypre_utilities.hpp" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled (or weighted) Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax( hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Int relax_type, /* relaxation type */ HYPRE_Int relax_times, /* number of sweeps */ HYPRE_Real *l1_norms, /* l1 norms of the rows of A */ HYPRE_Real relax_weight, /* damping coefficient (usually <= 1) */ HYPRE_Real omega, /* SOR parameter (usually in (0,2) */ HYPRE_Real max_eig_est, /* for cheby smoothers */ HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z /* temporary vector */ ) { HYPRE_Int sweep; for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { hypre_BoomerAMGRelax(A, f, NULL, 7, 0, relax_weight, 1.0, l1_norms, u, v, z); } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { /* !!! Note: relax_weight and omega flipped !!! */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z, 1, 1 /* symm */); } else #endif { hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z, 1, 1 /* symm */, 0 /* skip diag */, 1, 0); } } else if (relax_type == 3) /* Kaczmarz */ { hypre_BoomerAMGRelax(A, f, NULL, 20, 0, relax_weight, omega, l1_norms, u, v, z); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else { hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) template<HYPRE_Int dir> __global__ void hypreCUDAKernel_ParVectorBlockSplitGather(HYPRE_Int size, HYPRE_Int dim, HYPRE_Real *x0, HYPRE_Real *x1, HYPRE_Real *x2, HYPRE_Real *x) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1,1>(); if (i >= size * dim) { return; } HYPRE_Real *xx[3]; xx[0] = x0; xx[1] = x1; xx[2] = x2; const HYPRE_Int d = i % dim; const HYPRE_Int k = i / dim; if (dir == 0) { xx[d][k] = x[i]; } else if (dir == 1) { x[i] = xx[d][k]; } } #endif HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) ); #endif size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(size_ * dim, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<0>, gDim, bDim, size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data); } else #endif { for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data_[d][i] = x_data[dim*i+d]; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) ); #endif size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(size_ * dim, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<1>, gDim, bDim, size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data); } else #endif { for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data[dim*i+d] = x_data_[d][i]; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRowsHost(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm += fabs(A_diag_data[j]); if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm += fabs(A_offd_data[j]); if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (A_diag_J[j] == i) A_diag_data[j] = 1.0; else A_diag_data[j] = 0.0; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) A_offd_data[j] = 0.0; } } return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_ParCSRMatrixFixZeroRows( HYPRE_Int nrows, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Complex *A_diag_data, HYPRE_Int *A_offd_i, HYPRE_Complex *A_offd_data, HYPRE_Int num_cols_offd) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ HYPRE_Real l1_norm = 0.0; HYPRE_Int p1, q1, p2 = 0, q2 = 0; if (lane < 2) { p1 = read_only_load(A_diag_i + row_i + lane); if (num_cols_offd) { p2 = read_only_load(A_offd_i + row_i + lane); } } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); if (num_cols_offd) { q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { l1_norm += fabs(A_diag_data[j]); } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { l1_norm += fabs(A_offd_data[j]); } l1_norm = warp_allreduce_sum(l1_norm); if (l1_norm <= eps) { for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { if (row_i == read_only_load(&A_diag_j[j])) { A_diag_data[j] = 1.0; } else { A_diag_data[j] = 0.0; } } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { A_offd_data[j] = 0.0; } } } HYPRE_Int hypre_ParCSRMatrixFixZeroRowsDevice(hypre_ParCSRMatrix *A) { HYPRE_Int nrows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); dim3 bDim, gDim; bDim = hypre_GetDefaultCUDABlockDimension(); gDim = hypre_GetDefaultCUDAGridDimension(nrows, "warp", bDim); HYPRE_CUDA_LAUNCH(hypreCUDAKernel_ParCSRMatrixFixZeroRows, gDim, bDim, nrows, A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_data, num_cols_offd); //hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixFixZeroRowsDevice(A); } else #endif { return hypre_ParCSRMatrixFixZeroRowsHost(A); } } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex> { __host__ __device__ HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const { return x <= 4.0/3.0 * y ? y : x; } }; #endif HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 ); if (exec == HYPRE_EXEC_HOST) { HYPRE_Int num_threads = hypre_NumThreads(); if (num_threads > 1) { return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr); } } HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1); HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; HYPRE_Real *diag_tmp = NULL; HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) { cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data, memory_location_tmp, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (exec == HYPRE_EXEC_DEVICE) { cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); } else { cf_marker_dev = cf_marker; } } if (option == 1) { /* Set the l1 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set"); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 2) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 3) { /* Set the CF l2 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set"); /* Add the CF l2 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add"); } } else if (option == 4) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1); /* Add the scaled l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add"); } /* Truncate according to Remark 6.2 */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i]) { l1_norm[i] = diag_tmp[i]; } } } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( exec == HYPRE_EXEC_DEVICE) { thrust::identity<HYPRE_Complex> identity; HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] == 0.0) { l1_norm[i] = 1.0; } } } *l1_norm_ptr = l1_norm; return hypre_error_flag; } /* Handle negative definite matrices */ if (!diag_tmp) { diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); } /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(), is_negative<HYPRE_Real>() ); //bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) ); bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() ); if ( any_zero ) { hypre_error_in_arg(1); } } else #endif { for (i = 0; i < num_rows; i++) { if (diag_tmp[i] < 0.0) { l1_norm[i] = -l1_norm[i]; } } for (i = 0; i < num_rows; i++) { /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } if (exec == HYPRE_EXEC_DEVICE) { hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE); } hypre_TFree(cf_marker_offd, memory_location_tmp); hypre_TFree(diag_tmp, memory_location_tmp); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_ParCSRMatrixSetDiagRows(HYPRE_Int nrows, HYPRE_Int *A_diag_I, HYPRE_Int *A_diag_J, HYPRE_Complex *A_diag_data, HYPRE_Int *A_offd_I, HYPRE_Int num_cols_offd, HYPRE_Real d) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1,1>(); if (i >= nrows) { return; } HYPRE_Int j = read_only_load(&A_diag_I[i]); if ( (read_only_load(&A_diag_I[i+1]) == j+1) && (read_only_load(&A_diag_J[j]) == i) && (!num_cols_offd || (read_only_load(&A_offd_I[i+1]) == read_only_load(&A_offd_I[i]))) ) { A_diag_data[j] = d; } } #endif HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(num_rows, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParCSRMatrixSetDiagRows, gDim, bDim, num_rows, A_diag_I, A_diag_J, A_diag_data, A_offd_I, num_cols_offd, d); } else #endif { for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i]))) { A_diag_data[j] = d; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> zz = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) hypre_ParCSRMatrixDestroy(ams_data -> A_G); if (!ams_data -> beta_is_zero) if (ams_data -> B_G) HYPRE_BoomerAMGDestroy(ams_data -> B_G); if (ams_data -> owns_Pi && ams_data -> Pi) hypre_ParCSRMatrixDestroy(ams_data -> Pi); if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); if (ams_data -> B_Pi) HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); if (ams_data -> owns_Pi && ams_data -> Pix) hypre_ParCSRMatrixDestroy(ams_data -> Pix); if (ams_data -> A_Pix) hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); if (ams_data -> B_Pix) HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); if (ams_data -> owns_Pi && ams_data -> Piy) hypre_ParCSRMatrixDestroy(ams_data -> Piy); if (ams_data -> A_Piy) hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); if (ams_data -> B_Piy) HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); if (ams_data -> owns_Pi && ams_data -> Piz) hypre_ParCSRMatrixDestroy(ams_data -> Piz); if (ams_data -> A_Piz) hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); if (ams_data -> B_Piz) HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); if (ams_data -> r0) hypre_ParVectorDestroy(ams_data -> r0); if (ams_data -> g0) hypre_ParVectorDestroy(ams_data -> g0); if (ams_data -> r1) hypre_ParVectorDestroy(ams_data -> r1); if (ams_data -> g1) hypre_ParVectorDestroy(ams_data -> g1); if (ams_data -> r2) hypre_ParVectorDestroy(ams_data -> r2); if (ams_data -> g2) hypre_ParVectorDestroy(ams_data -> g2); if (ams_data -> zz) hypre_ParVectorDestroy(ams_data -> zz); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> A); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> G0); if (ams_data -> A_G0) hypre_ParCSRMatrixDestroy(ams_data -> A_G0); if (ams_data -> B_G0) HYPRE_BoomerAMGDestroy(ams_data -> B_G0); hypre_SeqVectorDestroy(ams_data -> A_l1_norms); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) { hypre_TFree(ams_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 1 && dim != 2 && dim != 3) hypre_error_in_arg(2); ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) ams_data -> beta_is_zero = 1; else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputePi_copy1(HYPRE_Int nnz, HYPRE_Int dim, HYPRE_Int *j_in, HYPRE_Int *j_out) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1,1>(); if (i < nnz) { const HYPRE_Int j = dim * i; for (HYPRE_Int d = 0; d < dim; d++) { j_out[j+d] = dim * read_only_load(&j_in[i]) + d; } } } __global__ void hypreCUDAKernel_AMSComputePi_copy2(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_out) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1,1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0; const HYPRE_Int k = j * dim; for (HYPRE_Int d = 0; d < dim; d++) { data_out[k+d] = v * G[d]; } } } #endif HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); col_starts_size = 2; col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i]; Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 2) Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G), hypre_ParCSRMatrixMemoryLocation(Pi) ); #endif /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, G_diag_I, G_diag_I + G_diag_nrows + 1, Pi_diag_I, dim * _1 ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_diag_nnz, dim, G_diag_J, Pi_diag_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, Pi_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) Pi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; if (dim >= 2) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( transform, G_offd_I, G_offd_I + G_offd_nrows + 1, Pi_offd_I, dim * _1 ); } dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_offd_nnz, dim, G_offd_J, Pi_offd_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, Pi_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) Pi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; if (dim >= 2) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim * G_cmap[i] + (HYPRE_BigInt)d; } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputePixyz_copy(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_x_out, HYPRE_Real *data_y_out, HYPRE_Real *data_z_out ) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1,1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3], *Odata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; Odata[0] = data_x_out; Odata[1] = data_y_out; Odata[2] = data_z_out; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0; for (HYPRE_Int d = 0; d < dim; d++) { Odata[d][j] = v * G[d]; } } } #endif HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(G) ); #endif /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(Pix) = 0; hypre_ParCSRMatrixInitialize(Pix); if (dim >= 2) { Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(Piy) = 0; hypre_ParCSRMatrixInitialize(Piy); } if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(Piz) = 0; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 2) Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I, G_diag_I)), G_diag_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I, Piz_diag_I)) ); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J, G_diag_J)), G_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J, Piz_diag_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, Pix_diag_data, Piy_diag_data, Piz_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } else if (dim == 2) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I)), G_diag_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I)) ); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J)), G_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, NULL, Pix_diag_data, Piy_diag_data, NULL ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, G_diag_I, G_diag_nrows + 1, Pix_diag_I ); HYPRE_THRUST_CALL( copy_n, G_diag_J, G_diag_nnz, Pix_diag_J ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, NULL, NULL, Pix_diag_data, NULL, NULL ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; } } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I, G_offd_I)), G_offd_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I, Piz_offd_I)) ); } HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J, G_offd_J)), G_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J, Piz_offd_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, Pix_offd_data, Piy_offd_data, Piz_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else if (dim == 2) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I)), G_offd_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I)) ); } HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J)), G_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, NULL, Pix_offd_data, Piy_offd_data, NULL ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, G_offd_I, G_offd_nrows + 1, Pix_offd_I ); } HYPRE_THRUST_CALL( copy_n, G_offd_J, G_offd_nnz, Pix_offd_J ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, NULL, NULL, Pix_offd_data, NULL, NULL ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; if (dim >= 2) *Piy_ptr = Piy; if (dim == 3) *Piz_ptr = Piz; return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputeGPi_copy2(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_out) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1,1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim - 1) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim - 1; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real u = read_only_load(&data_in[j]); const HYPRE_Real v = fabs(u) * 0.5; const HYPRE_Int k = j * dim; data_out[k] = u; for (HYPRE_Int d = 0; d < dim - 1; d++) { data_out[k+d+1] = v * G[d]; } } } #endif /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); col_starts_size = 2; col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i]; GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0; hypre_ParCSRMatrixOwnsColStarts(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 3) Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 4) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G), hypre_ParCSRMatrixMemoryLocation(GPi) ); #endif /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, G_diag_I, G_diag_I + G_diag_nrows + 1, GPi_diag_I, dim * _1 ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_diag_nnz, dim, G_diag_J, GPi_diag_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, GPi_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) GPi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; if (dim >= 3) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( transform, G_offd_I, G_offd_I + G_offd_nrows + 1, GPi_offd_I, dim * _1 ); } dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_offd_nnz, dim, G_offd_J, GPi_offd_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, GPi_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) GPi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; if (dim >= 3) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) GPi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_FixInterNodes( HYPRE_Int nrows, HYPRE_Int *G0t_diag_i, HYPRE_Complex *G0t_diag_data, HYPRE_Int *G0t_offd_i, HYPRE_Complex *G0t_offd_data, HYPRE_Real *interior_nodes_data) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Int not1 = 0; if (lane == 0) { not1 = read_only_load(&interior_nodes_data[row_i]) != 1.0; } not1 = __shfl_sync(HYPRE_WARP_FULL_MASK, not1, 0); if (!not1) { return; } HYPRE_Int p1, q1, p2 = 0, q2 = 0; bool nonempty_offd = G0t_offd_data != NULL; if (lane < 2) { p1 = read_only_load(G0t_diag_i + row_i + lane); if (nonempty_offd) { p2 = read_only_load(G0t_offd_i + row_i + lane); } } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); if (nonempty_offd) { q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { G0t_diag_data[j] = 0.0; } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { G0t_offd_data[j] = 0.0; } } __global__ void hypreCUDAKernel_AMSSetupScaleGGt( HYPRE_Int Gt_num_rows, HYPRE_Int *Gt_diag_i, HYPRE_Int *Gt_diag_j, HYPRE_Real *Gt_diag_data, HYPRE_Int *Gt_offd_i, HYPRE_Real *Gt_offd_data, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data ) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= Gt_num_rows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Real h2 = 0.0; HYPRE_Int ne, p1, q1, p2 = 0, q2 = 0; if (lane < 2) { p1 = read_only_load(Gt_diag_i + row_i + lane); } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); ne = q1 - p1; if (ne == 0) { return; } if (Gt_offd_data != NULL) { if (lane < 2) { p2 = read_only_load(Gt_offd_i + row_i + lane); } q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { const HYPRE_Int k = read_only_load(&Gt_diag_j[j]); const HYPRE_Real Gx = read_only_load(&Gx_data[k]); const HYPRE_Real Gy = read_only_load(&Gy_data[k]); const HYPRE_Real Gz = read_only_load(&Gz_data[k]); h2 += Gx*Gx + Gy*Gy + Gz*Gz; } h2 = warp_allreduce_sum(h2) / ne; for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { Gt_diag_data[j] *= h2; } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { Gt_offd_data[j] *= h2; } } #endif HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); #endif hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data=hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(nv, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_FixInterNodes, gDim, bDim, nv, G0tdI, G0tdA, G0toI, G0toA, interior_nodes_data ); } else #endif { for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i+1]; j++) G0tdA[j] = 0.0; if (G0toI) for (j = G0toI[i]; j < G0toI[i+1]; j++) G0toA[j] = 0.0; } } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_G0 = hypre_ParCSRMatMat(G0t, ams_data -> G0); } else #endif { ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); } hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrix *A; if (exec == HYPRE_EXEC_DEVICE) { A = hypre_ParCSRMatMat(ams_data -> G0, G0t); } else #endif { A = hypre_ParMatmul(ams_data -> G0, G0t); } hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; HYPRE_Real factor, lfactor; /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i; HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B)); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B)); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B)); lfactor = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); #if defined(HYPRE_DEBUG) HYPRE_Int nnz; hypre_TMemcpy(&nnz, &B_diag_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_assert(nnz == nnz_diag); hypre_TMemcpy(&nnz, &B_offd_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_assert(nnz == nnz_offd); #endif if (nnz_diag) { lfactor = HYPRE_THRUST_CALL( reduce, thrust::make_transform_iterator(B_diag_data, absolute_value<HYPRE_Real>()), thrust::make_transform_iterator(B_diag_data + nnz_diag, absolute_value<HYPRE_Real>()), -1.0, thrust::maximum<HYPRE_Real>() ); } if (nnz_offd) { lfactor = HYPRE_THRUST_CALL( reduce, thrust::make_transform_iterator(B_offd_data, absolute_value<HYPRE_Real>()), thrust::make_transform_iterator(B_offd_data + nnz_offd, absolute_value<HYPRE_Real>()), lfactor, thrust::maximum<HYPRE_Real>() ); } } else #endif { for (i = 0; i < B_diag_i[B_num_rows]; i++) if (fabs(B_diag_data[i]) > lfactor) lfactor = fabs(B_diag_data[i]); for (i = 0; i < B_offd_i[B_num_rows]; i++) if (fabs(B_offd_data[i]) > lfactor) lfactor = fabs(B_offd_data[i]); } lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); } hypre_ParCSRMatrixAdd(factor, A, 1.0, B, &C); /*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B);*/ /* scale (penalize) G0 G0^T before adding it to the matrix */ /*{ HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local); C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 1; hypre_ParCSRMatrixOwnsColStarts(G0t) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); */ hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) { HYPRE_Real *l1_norm_data = NULL; hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data); ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A)); hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data; hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A)); } /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && (ams_data -> dim == 1 || ams_data -> y != NULL) && (ams_data -> dim <= 2 || ams_data -> z != NULL)) input_info = 1; if (ams_data -> Gx != NULL && (ams_data -> dim == 1 || ams_data -> Gy != NULL) && (ams_data -> dim <= 2 || ams_data -> Gz != NULL)) input_info = 2; if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); if (ams_data -> dim >= 2) { ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); } if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); if (ams_data -> dim >= 2) hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { HYPRE_Int G_owned_col_starts; if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) hypre_MatvecCommPkgCreate(ams_data -> G); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_G = hypre_ParCSRMatrixRAPKT(ams_data -> G, ams_data -> A, ams_data -> G, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); } /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts; hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0; ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, 0, 0); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_Int P_owned_col_starts; HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) hypre_MatvecCommPkgCreate(ams_data -> Pix); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pix = hypre_ParCSRMatrixRAPKT(ams_data -> Pix, ams_data -> A, ams_data -> Pix, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0; } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, 0, 0); if (ams_data -> Piy) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) hypre_MatvecCommPkgCreate(ams_data -> Piy); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Piy = hypre_ParCSRMatrixRAPKT(ams_data -> Piy, ams_data -> A, ams_data -> Piy, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0; } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, 0, 0); } if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) hypre_MatvecCommPkgCreate(ams_data -> Piz); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Piz = hypre_ParCSRMatrixRAPKT(ams_data -> Piz, ams_data -> A, ams_data -> Piz, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0; } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, 0, 0); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) hypre_MatvecCommPkgCreate(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); hypre_ParCSRMatrixOwnsColStarts(Gt) = 0; hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0; /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(Gt_num_rows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSSetupScaleGGt, gDim, bDim, Gt_num_rows, Gt_diag_I, Gt_diag_J, Gt_diag_data, Gt_offd_I, Gt_offd_data, Gx_data, Gy_data, Gz_data ); } else #endif { for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) Gt_diag_data[j] *= h2; for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++) Gt_offd_data[j] *= h2; } } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); if (ams_data -> dim >= 2) hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { GGt = hypre_ParCSRMatMat(ams_data -> G, Gt); } #endif else { GGt = hypre_ParMatmul(ams_data -> G, Gt); } hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ hypre_ParCSRMatrixAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt); /*{ hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixBigAdd(A_local, B_local); hypre_CSRMatrixBigJtoJ(C_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; }*/ hypre_ParCSRMatrixDestroy(GGt); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ApGGt, ams_data -> Pi, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } } else { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ams_data -> A, ams_data -> Pi, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0; } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); else HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; HYPRE_Int needZ = 0; hypre_ParVector *z = ams_data -> zz; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { needZ = ams_data -> A_relax_type == 2 || ams_data -> A_relax_type == 4 || ams_data -> A_relax_type == 16; } else #endif { needZ = hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16; } if (needZ && !z) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); hypre_ParVectorSetPartitioningOwner(z,0); ams_data -> zz = z; } if (ams_data -> print_level > 0) hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle,"%s","020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle,"%s","(0+2)"); break; case 11: case 13: hypre_sprintf(cycle,"%s","0345430"); break; case 12: hypre_sprintf(cycle,"%s","(0+3+4+5)"); break; case 14: hypre_sprintf(cycle,"%s","0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","010"); break; case 1: default: hypre_sprintf(cycle,"%s","01210"); break; case 2: hypre_sprintf(cycle,"%s","(0+1+2)"); break; case 3: hypre_sprintf(cycle,"%s","02120"); break; case 4: hypre_sprintf(cycle,"%s","(010+2)"); break; case 5: hypre_sprintf(cycle,"%s","0102010"); break; case 6: hypre_sprintf(cycle,"%s","(020+1)"); break; case 7: hypre_sprintf(cycle,"%s","0201020"); break; case 8: hypre_sprintf(cycle,"%s","0(+1+2)0"); break; case 9: hypre_sprintf(cycle,"%s","01210"); break; case 11: hypre_sprintf(cycle,"%s","013454310"); break; case 12: hypre_sprintf(cycle,"%s","(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle,"%s","034515430"); break; case 14: hypre_sprintf(cycle,"%s","01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle,"%s","020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i+1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) hypre_error(HYPRE_ERROR_CONV); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') continue; /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x,r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) hypre_error_in_arg(16); /* skip empty subspaces */ if (!A[i]) continue; /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x,g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_BigInt *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST); HYPRE_Int part_size; HYPRE_BigInt *row_starts, *col_starts; HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2*nedges); for (i = 0; i <= nedges; i++) I[i] = 2*i; if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2*nedges; i+=2) { data[i] = -1.0; data[i+1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*nedges; i+=2) { if (edge_vertex[i] < edge_vertex[i+1]) { data[i] = -1.0; data[i+1] = 1.0; } else { data[i] = 1.0; data[i+1] = -1.0; } } } else { hypre_error_in_arg(4); } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Copy partitioning from A and x_coord (previously they were re-used) */ part_size = 2; row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); for (i = 0; i < part_size; i++) { row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i]; col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i]; } /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), row_starts, col_starts, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 1; hypre_ParCSRMatrixOwnsColStarts(G) = 1; hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_BigInt *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_BigInt *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt *vert_part, num_global_vert; HYPRE_BigInt vert_start, vert_end; HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert; /* Find the processor partitioning of the vertices */ vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - big_local_vert; hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; hypre_ParVectorOwnsPartitioning(x_coord) = 0; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; hypre_ParVectorOwnsPartitioning(y_coord) = 0; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; hypre_ParVectorOwnsPartitioning(z_coord) = 0; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = (HYPRE_Int)(vert_number[i] - vert_start); x_data[j] = vert_coord[3*i]; y_data[j] = vert_coord[3*i+1]; z_data[j] = vert_coord[3*i+2]; } } /* Change vertex numbers from local to global */ for (i = 0; i < 2*num_edges; i++) edge_vertex[i] = vert_number[edge_vertex[i]]; /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2*num_edges); for (i = 0; i <= num_edges; i++) I[i] = 2*i; /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*num_edges; i+=2) { data[i] = 1.0; data[i+1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 0; hypre_ParCSRMatrixOwnsColStarts(G) = 1; hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, vert_start, vert_end); //hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) hypre_ParCSRMatrixDestroy(ams_data -> G); if (ams_data -> x) hypre_ParVectorDestroy(ams_data -> x); if (ams_data -> y) hypre_ParVectorDestroy(ams_data -> y); if (ams_data -> z) hypre_ParVectorDestroy(ams_data -> z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows/num_threads; rest = num_rows - size*num_threads; if (k < rest) { ns = k*size+k; ne = (k+1)*size+k+1; } else { ns = k*size+rest; ne = (k+1)*size+rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ for (i = ns; i < ne; i++) { l1_norm[i] = A_diag_data[A_diag_I[i]]; if (l1_norm[i] == 0) l1_norm[i] = 1.0; } } if (option < 5) { /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; return hypre_error_flag; }
kmp_aligned_malloc.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <stdint.h> #include <omp.h> #include "omp_testsuite.h" int alignments[] = {64, 128, 256, 512, 1024, 2048, 4096}; unsigned aligned_by(uint64_t addr) { uint64_t alignment = 1; while((addr & (alignment-1)) == 0) { alignment <<= 1; } return (alignment >> 1); } int test_kmp_aligned_malloc() { int err = 0; #pragma omp parallel shared(err) { int i; int* ptr; uint64_t addr; int tid = omp_get_thread_num(); for(i = 0; i < sizeof(alignments)/sizeof(int); i++) { int alignment = alignments[i]; // allocate 64 bytes with 64-byte alignment // allocate 128 bytes with 128-byte alignment, etc. ptr = (int*)kmp_aligned_malloc(alignment, alignment); addr = (uint64_t)ptr; if(addr & (alignment-1)) { printf("thread %d: addr = %p (aligned to %u bytes) but expected " " alignment = %d\n", tid, ptr, aligned_by(addr), alignment); err = 1; } kmp_free(ptr); } ptr = kmp_aligned_malloc(128, 127); if (ptr != NULL) { printf("thread %d: kmp_aligned_malloc() didn't return NULL when " "alignment was not power of 2\n", tid); err = 1; } } /* end of parallel */ return !err; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_aligned_malloc()) { num_failed++; } } return num_failed; }
barrier.c
#include<stdio.h> void do_sth() { printf ("hello.\n"); } int main(void) { #pragma omp parallel { do_sth(); #pragma omp barrier do_sth(); } return 0; }
bins_dynamic_mpi.h
// // Project Name: Kratos // Last Modified by: $Author: pooyan $ // Date: $Date: 2007-03-27 17:02:19 $ // Revision: $Revision: 1.1.1.1 $ // // #if !defined(KRATOS_BINS_DYNAMIC_MPI_CONTAINER_H_INCLUDE) #define KRATOS_BINS_DYNAMIC_MPI_CONTAINER_H_INCLUDE #include "mpi.h" #include "spatial_containers/tree.h" #include "includes/serializer.h" #include "utilities/timer.h" namespace Kratos { /// This class its an implementation of BinsDynamic using MPI /** * Use the seam way you use the generic BinsDynamic */ template<class TConfigure> class BinsDynamicMpi { public: enum { Dimension = TConfigure::Dimension }; typedef TConfigure Configure; typedef typename TConfigure::PointType PointType; typedef typename TConfigure::PointVector ContainerType; typedef typename TConfigure::PointIterator IteratorType; typedef typename TConfigure::DistanceIterator DistanceIteratorType; typedef typename TConfigure::PtrPointType PointerType; typedef typename TConfigure::DistanceFunction DistanceFunction; typedef std::vector<PointerType> PointVector; typedef std::vector<PointVector> CellsContainerType; typedef typename PointVector::iterator PointIterator; typedef TreeNode<Dimension,PointType,PointerType,IteratorType,DistanceIteratorType> TreeNodeType; typedef typename TreeNodeType::CoordinateType CoordinateType; // double typedef typename TreeNodeType::SizeType SizeType; // std::size_t typedef typename TreeNodeType::IndexType IndexType; // std::size_t typedef TreeNodeType LeafType; typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType; typedef typename TreeNodeType::SearchStructureType SearchStructureType; typedef Tvector<IndexType,Dimension> CellType; typedef Kratos::SearchUtils::SearchNearestInRange<PointType,PointerType,PointIterator,DistanceFunction,CoordinateType> SearchNearestInRange; typedef Kratos::SearchUtils::SearchRadiusInRange<PointType,PointIterator,DistanceIteratorType,DistanceFunction,SizeType,CoordinateType,IteratorType> SearchRadiusInRange; typedef Kratos::SearchUtils::SearchBoxInRange<PointType,PointIterator,SizeType,Dimension,IteratorType> SearchBoxInRange; typedef Kratos::SearchUtils::SquaredDistanceFunction<Dimension,PointType> SquaredDistanceFunction; /// Pointer definition of BinsDynamicMpi KRATOS_CLASS_POINTER_DEFINITION(BinsDynamicMpi); /// Default constructor. /** * Empy constructor, you shouldn't use this unless you know what you are doing. */ BinsDynamicMpi() : mPointBegin(this->NullIterator()), mPointEnd(this->NullIterator()), mNumPoints(0) {}; /// ModelPart Constructor. /** * Creates and initializes BinsDynamic using the LocalMesh of the ModelPart provided as argument. * @param StaticMesh The geometry used to generate the Bins * @param ParticMesh Not used atm * @param BoxSize Size of the box * @param BucketSize default = 1 */ BinsDynamicMpi( ModelPart * StaticMesh, ModelPart * ParticMesh, CoordinateType BoxSize, SizeType BucketSize = 1 ) { MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); IteratorType mPointIterator; this->StaticMesh = StaticMesh; if (mpi_size != 1) { mPointBegin = new PointType* [StaticMesh->GetCommunicator().LocalMesh().NumberOfNodes()]; mPointEnd = mPointBegin + StaticMesh->GetCommunicator().LocalMesh().NumberOfNodes(); std::cout << "Local Mesh has: " << StaticMesh->GetCommunicator().LocalMesh().NumberOfNodes() << " Nodes" << std::endl; std::cout << "Ghost Mesh has: " << StaticMesh->GetCommunicator().GhostMesh().NumberOfNodes() << " Nodes" << std::endl; std::cout << "Full Mesh has: " << StaticMesh->NumberOfNodes() << " Nodes" << std::endl; mPointIterator = mPointBegin; for( ModelPart::NodesContainerType::iterator inode = StaticMesh->GetCommunicator().LocalMesh().NodesBegin(); inode != StaticMesh->GetCommunicator().LocalMesh().NodesEnd(); inode++, mPointIterator++) { PointType auxPoint; auxPoint[0] = inode->X(); auxPoint[1] = inode->Y(); auxPoint[2] = inode->Z(); (*mPointIterator) = new PointType(auxPoint); // std::cout << "(" << mpi_rank << ") "<< inode->Id() << " - " << inode->X() << " " << inode->Y() << " " << inode->Z() << std::endl; } } else { mPointBegin = new PointType* [StaticMesh->NumberOfNodes()]; mPointEnd = mPointBegin + StaticMesh->NumberOfNodes(); mPointIterator = mPointBegin; for( ModelPart::NodesContainerType::iterator inode = StaticMesh->NodesBegin(); inode != StaticMesh->NodesEnd(); inode++, mPointIterator++) { PointType auxPoint; auxPoint[0] = inode->X(); auxPoint[1] = inode->Y(); auxPoint[2] = inode->Z(); (*mPointIterator) = new PointType(auxPoint); } } if(mPointBegin==mPointEnd) return; mNumPoints = std::distance(mPointBegin,mPointEnd); CalculateBoundingBox(); CalculateCellSize(BoxSize); AllocateCellsContainer(); GenerateBins(); GenerateCommunicationGraph(); } //************************************************************************ /// Destructor. virtual ~BinsDynamicMpi() { char msg[12] = {'b','i','n','s','_','X','.','t','i','m','e','\0'}; msg[5] = '0' + mpi_rank; Timer::SetOuputFile(msg); Timer::PrintTimingInformation(); } //************************************************************************ /// Pointer to first element. IteratorType Begin() { return mPointBegin; } //************************************************************************ /// Pointer to last element. IteratorType End() { return mPointBegin; } //************************************************************************ /// Size of specific dimension. CoordinateType CellSize( SizeType const& iDim ) { return mCellSize[iDim]; } //************************************************************************ /// Number of cells of specific dimension. SizeType NumCell( SizeType const& iDim ) { return mN[iDim]; } //************************************************************************ /// Calcutes bounding box of particles in bins void CalculateBoundingBox() { for(SizeType i = 0 ; i < Dimension ; i++) { mMinPoint[i] = (**mPointBegin)[i]; mMaxPoint[i] = (**mPointBegin)[i]; } for(IteratorType Point = mPointBegin ; Point != mPointEnd ; Point++) { for(SizeType i = 0 ; i < Dimension ; i++) { if( (**Point)[i] < mMinPoint[i] ) mMinPoint[i] = (**Point)[i]; if( (**Point)[i] > mMaxPoint[i] ) mMaxPoint[i] = (**Point)[i]; } } } //************************************************************************ /// Calcutes cell Size void CalculateCellSize() { CoordinateType delta[Dimension]; CoordinateType alpha[Dimension]; CoordinateType mult_delta = 1.00; SizeType index = 0; for(SizeType i = 0 ; i < Dimension ; i++) { delta[i] = mMaxPoint[i] - mMinPoint[i]; if ( delta[i] > delta[index] ) index = i; delta[i] = (delta[i] == 0.00) ? 1.00 : delta[i]; } for(SizeType i = 0 ; i < Dimension ; i++){ alpha[i] = delta[i] / delta[index]; mult_delta *= alpha[i]; } mN[index] = static_cast<SizeType>( pow(static_cast<CoordinateType>(SearchUtils::PointerDistance(mPointBegin,mPointEnd)/mult_delta), 1.00/Dimension)+1 ); for(SizeType i = 0 ; i < Dimension ; i++){ if(i!=index) { mN[i] = static_cast<SizeType>(alpha[i] * mN[index]); mN[i] = ( mN[i] == 0 ) ? 1 : mN[i]; } } // for(SizeType i = 0 ; i < Dimension ; i++){ // mN[i] *= mpi_size; // } for(SizeType i = 0 ; i < Dimension ; i++){ mCellSize[i] = delta[i] / mN[i]; mInvCellSize[i] = 1.00 / mCellSize[i]; } } //************************************************************************ /// Calcutes cell Size gived the container box void CalculateCellSize( CoordinateType BoxSize ) { for(SizeType i = 0 ; i < Dimension ; i++){ mCellSize[i] = BoxSize; mInvCellSize[i] = 1.00 / mCellSize[i]; mN[i] = static_cast<SizeType>( (mMaxPoint[i]-mMinPoint[i]) / mCellSize[i]) + 1; } } //************************************************************************ void AllocateCellsContainer() { SizeType Size = 1; for(SizeType i = 0 ; i < Dimension ; i++) Size *= mN[i]; // Resize Global Container mPoints.resize(Size); } //************************************************************************ void GenerateBins(){ for(IteratorType i_point = mPointBegin ; i_point != mPointEnd ; i_point++) mPoints[CalculateIndex(**i_point)].push_back(*i_point); } void GenerateCommunicationGraph() { double * MpiMinPoints = new double[mpi_size * Dimension]; double * MpiMaxPoints = new double[mpi_size * Dimension]; double * MyMinPoint = new double[Dimension]; double * MyMaxPoint = new double[Dimension]; for(size_t i = 0; i < Dimension; i++) { MyMinPoint[i] = mMinPoint[i]; MyMaxPoint[i] = mMaxPoint[i]; } mpi_connectivity = vector<int>(mpi_size); mpi_MinPoints = vector<vector<double> >(mpi_size, vector<double>(Dimension)); mpi_MaxPoints = vector<vector<double> >(mpi_size, vector<double>(Dimension)); MPI_Allgather(MyMinPoint,Dimension,MPI_DOUBLE,MpiMinPoints,Dimension,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(MyMaxPoint,Dimension,MPI_DOUBLE,MpiMaxPoints,Dimension,MPI_DOUBLE,MPI_COMM_WORLD); for(int i = 0; i < mpi_size; i++) { mpi_connectivity[i] = 0; for(size_t j = 0; j < Dimension; j++) { mpi_MinPoints[i][j] = MpiMinPoints[i * Dimension + j]; mpi_MaxPoints[i][j] = MpiMaxPoints[i * Dimension + j]; } } delete [] MpiMinPoints; delete [] MpiMaxPoints; delete [] MyMinPoint; delete [] MyMaxPoint; } void PrepareCommunications(int * NumberOfSendElements,int * NumberOfRecvElements,int * msgSendSize,int * msgRecvSize) { MPI_Alltoall(msgSendSize,1,MPI_INT,msgRecvSize,1,MPI_INT,MPI_COMM_WORLD); MPI_Alltoall(NumberOfSendElements,1,MPI_INT,NumberOfRecvElements,1,MPI_INT,MPI_COMM_WORLD); } void AsyncSendAndRecive(std::string messages[],int * msgSendSize,int * msgRecvSize) { int NumberOfCommunicationEvents = 0; int NumberOfCommunicationEventsIndex = 0; char * recvBuffers[mpi_size]; for(int j = 0; j < mpi_size; j++) { if(j != mpi_rank && msgRecvSize[j]) NumberOfCommunicationEvents++; if(j != mpi_rank && msgSendSize[j]) NumberOfCommunicationEvents++; } MPI_Request * reqs = new MPI_Request[NumberOfCommunicationEvents]; MPI_Status * stats = new MPI_Status[NumberOfCommunicationEvents]; //Set up all receive and send events for(int i = 0; i < mpi_size; i++) { if(i != mpi_rank && msgRecvSize[i]) { recvBuffers[i] = (char *)malloc(sizeof(char) * msgRecvSize[i]); MPI_Irecv(recvBuffers[i],msgRecvSize[i],MPI_CHAR,i,0,MPI_COMM_WORLD,&reqs[NumberOfCommunicationEventsIndex++]); } if(i != mpi_rank && msgSendSize[i]) { char * mpi_send_buffer = (char *)malloc(sizeof(char) * msgSendSize[i]); memcpy(mpi_send_buffer,messages[i].c_str(),msgSendSize[i]); MPI_Isend(mpi_send_buffer,msgSendSize[i],MPI_CHAR,i,0,MPI_COMM_WORLD,&reqs[NumberOfCommunicationEventsIndex++]); } } //wait untill all communications finish MPI_Waitall(NumberOfCommunicationEvents, reqs, stats); for(int i = 0; i < mpi_size; i++) { if(i != mpi_rank && msgRecvSize[i]) messages[i] = std::string(recvBuffers[i],msgRecvSize[i]); } delete [] reqs; delete [] stats; } //************************************************************************ IndexType CalculatePosition( CoordinateType const& ThisCoord, SizeType ThisDimension ) { CoordinateType d_index = (ThisCoord - mMinPoint[ThisDimension]) * mInvCellSize[ThisDimension]; IndexType index = static_cast<IndexType>( (d_index < 0.00) ? 0.00 : d_index ); return (index > mN[ThisDimension]-1) ? mN[ThisDimension]-1 : index; } //************************************************************************ IndexType CalculateIndex( PointType const& ThisPoint ) { IndexType Index = 0; for(SizeType iDim = Dimension-1 ; iDim > 0 ; iDim--){ Index += CalculatePosition(ThisPoint[iDim],iDim); Index *= mN[iDim-1]; } Index += CalculatePosition(ThisPoint[0],0); return Index; } //************************************************************************ IndexType CalculateIndex( CellType const& ThisIndex ) { IndexType Index = 0; for(SizeType iDim = Dimension-1 ; iDim > 0 ; iDim--){ Index += ThisIndex[iDim]; Index *= mN[iDim-1]; } Index += ThisIndex[0]; return Index; } //************************************************************************ CellType CalculateCell( PointType const& ThisPoint ){ CellType Cell; for(SizeType i = 0 ; i < Dimension ; i++) Cell[i] = CalculatePosition(ThisPoint[i],i); return Cell; } CellType CalculateCell( PointType const& ThisPoint, CoordinateType Radius ){ CellType Cell; for(SizeType i = 0 ; i < Dimension ; i++) Cell[i] = CalculatePosition(ThisPoint[i]+Radius,i); return Cell; } //************************************************************************ void AddPoint( PointerType const& ThisPoint ){ mPoints[CalculateIndex(*ThisPoint)].push_back(ThisPoint); mNumPoints++; } //************************************************************************ void MPI_ExistPoint( PointerType const& ThisPoint, PointerType ResultNearest, CoordinateType const Tolerance = static_cast<CoordinateType>(10.0*DBL_EPSILON) ) { PointerType Nearest, remoteNearest[mpi_size], resultNearest[mpi_size], remoteThisPoint[mpi_size]; CoordinateType Distance, remoteDistance[mpi_size], resultDistance[mpi_size]; bool Found, remoteFound[mpi_size], resultFound[mpi_size]; int msgSendSize = 0; int msgRecvSize = 0; int msgResSendSize = 0; int msgResRecvSize = 0; std::cout << "(" << mpi_rank << ") --- " << (*ThisPoint) << " --- " << std::endl; Serializer particleSerializer; particleSerializer.save("nodes",ThisPoint); std::stringstream* serializer_buffer; serializer_buffer = (std::stringstream *)particleSerializer.pGetBuffer(); msgSendSize = serializer_buffer->str().size(); MPI_Allreduce(&msgSendSize,&msgRecvSize,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); char * mpi_send_buffer = new char[(msgRecvSize+1)]; char * mpi_recv_buffer = new char[(msgRecvSize+1) * mpi_size]; strcpy (mpi_send_buffer, serializer_buffer->str().c_str()); mpi_send_buffer[msgSendSize] = '\0'; MPI_Allgather(mpi_send_buffer,(msgRecvSize+1),MPI_CHAR,mpi_recv_buffer,(msgRecvSize+1),MPI_CHAR,MPI_COMM_WORLD); for(int i = 0; i < mpi_size; i++) { Serializer recvParticleSerializer; serializer_buffer = (std::stringstream *)recvParticleSerializer.pGetBuffer(); for(int j = 0; mpi_recv_buffer[(msgRecvSize+1)*i+j] != '\0'; j++) { (*serializer_buffer) << mpi_recv_buffer[(msgRecvSize+1)*i+j]; } remoteThisPoint[i] = new PointType(); remoteNearest[i] = new PointType(); remoteDistance[i] = static_cast<CoordinateType>(DBL_MAX); recvParticleSerializer.load("nodes",remoteThisPoint[i]); std::cout << "(" << mpi_rank << ")" << " Restored Par: " << "(" << remoteThisPoint[i]->X() << " " << remoteThisPoint[i]->Y() << " " << remoteThisPoint[i]->Z() << ")" << std::endl; SearchStructureType remote_Box( CalculateCell(*remoteThisPoint[i],-Tolerance), CalculateCell(*remoteThisPoint[i],Tolerance), mN ); SearchNearestInBox( *remoteThisPoint[i], remoteNearest[i], remoteDistance[i], remote_Box, remoteFound[i] ); std::cout << "(" << mpi_rank << ") Found point: (" << remoteThisPoint[i]->X() << " " << remoteThisPoint[i]->Y() << " " << remoteThisPoint[i]->Z() << ") from process(" << i << "): " << (*(remoteNearest[i])) << " with dist: " << remoteDistance[i] << std::endl; } std::stringstream * res_serializer_buffer[mpi_size]; for(int i = 0; i < mpi_size; i++) { Serializer resSerializer; resSerializer.save("nodes",remoteNearest[i]); res_serializer_buffer[i] = (std::stringstream *)resSerializer.pGetBuffer(); msgResSendSize = res_serializer_buffer[i]->str().size(); msgResSendSize = msgResSendSize > msgResRecvSize ? msgResSendSize : msgResRecvSize; MPI_Allreduce(&msgResSendSize,&msgResRecvSize,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); } char mpi_res_send_buffer[((msgResRecvSize + 1) * mpi_size)]; char mpi_res_recv_buffer[((msgResRecvSize + 1) * mpi_size)]; for(int i = 0; i < mpi_size; i++) { strcpy(&mpi_res_send_buffer[(msgResRecvSize + 1) * i], res_serializer_buffer[i]->str().c_str()); mpi_res_send_buffer[(msgResRecvSize + 1) * i + res_serializer_buffer[i]->str().size()] = '\0'; } MPI_Alltoall(mpi_res_send_buffer,(msgResRecvSize+1),MPI_CHAR,mpi_res_recv_buffer,(msgResRecvSize+1),MPI_CHAR,MPI_COMM_WORLD); MPI_Alltoall(remoteDistance,1,MPI_DOUBLE,resultDistance,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Alltoall(remoteFound,1,MPI_BYTE,resultFound,1,MPI_BYTE,MPI_COMM_WORLD); for (int i = 0; i < mpi_size; i++) { Serializer recvResParticleSerializer; serializer_buffer = (std::stringstream *)recvResParticleSerializer.pGetBuffer(); for(int j = 0; mpi_res_recv_buffer[(msgResRecvSize+1)*i+j] != '\0'; j++) { (*serializer_buffer) << mpi_res_recv_buffer[(msgResRecvSize+1)*i+j]; } resultNearest[i] = new PointType(); recvResParticleSerializer.load("nodes",resultNearest[i]); std::cout << "(" << mpi_rank << ") Result point from process (" << i << "): (" << resultNearest[i]->X() << " " << resultNearest[i]->Y() << " " << resultNearest[i]->Z() << ") with dist: " << resultDistance[i] << std::endl; } Nearest = resultNearest[0]; Distance = resultDistance[0]; Found = resultFound[0]; for(int i = 1; i < mpi_size; i++) { if(resultFound[i] && resultDistance[i] < Distance) { Nearest = resultNearest[0]; Distance = resultDistance[0]; Found = resultFound[0]; } } ResultNearest = this->NullPointer(); if(Found) ResultNearest = Nearest; delete [] mpi_send_buffer; delete [] mpi_recv_buffer; } //************************************************************************ /////////////////////////////////////////////////////////////////////////// // MPI Single Input Search /////////////////////////////////////////////////////////////////////////// void MPISingleSearchInRadiusTest(const SizeType& NumberOfPoints, const SizeType& MaxNumberOfResults, const double& Radius, const SizeType& times) { PointerType PointInput = new PointType[NumberOfPoints]; for(int i = 0; i < NumberOfPoints; i++) { PointType temp; temp[0] = (i+1)/NumberOfPoints; temp[1] = (i+1)/NumberOfPoints; temp[2] = 0; PointInput[i] = PointType(temp); } std::vector<SizeType> NumberOfResults(NumberOfPoints); std::vector<std::vector<PointerType> > Results(NumberOfPoints, std::vector<PointerType>(MaxNumberOfResults)); std::vector<std::vector<double> > ResultsDistances(NumberOfPoints, std::vector<double>(MaxNumberOfResults,0)); MPI_SearchInRadius(&PointInput[NumberOfPoints/2], Radius, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults); MPI_Barrier(MPI_COMM_WORLD); } void MPI_SearchInRadius( PointerType const& ThisPoints, CoordinateType const& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) { CoordinateType Radius2 = Radius * Radius; SearchInRadiusMpiWrapperSingle( ThisPoints, 1, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults ); } //************************************************************************ // SizeType MPI_SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results, // DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) // { // CoordinateType Radius2 = Radius * Radius; // SizeType NumberOfResults = 0; // Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); // SearchInRadiusMpiWrapper( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box ); // return NumberOfResults; // } //************************************************************************ // SizeType MPI_SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, // DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) // { // SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); // SearchInRadiusMpiWrapper( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box); // } //************************************************************************ // SizeType MPI_SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, // DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) // { // Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); // SearchInRadiusMpiWrapper( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box); // } /////////////////////////////////////////////////////////////////////////// // MPI Single Input END /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// // MPI Multiple Input Search /////////////////////////////////////////////////////////////////////////// void MPIMultiSearchInRadiusTest(const SizeType& NumberOfPoints, const SizeType& MaxNumberOfResults, const double& Radius, const SizeType& times) { //MultiSearch Test Timer::Start("ALL"); PointerType PointInput = new PointType[NumberOfPoints]; for (ModelPart::ElementsContainerType::iterator el_it = StaticMesh->ElementsBegin(); el_it != StaticMesh->ElementsEnd() && el_it-StaticMesh->ElementsBegin() < NumberOfPoints; el_it++) { PointType temp; Geometry<Node < 3 > >& geom = el_it->GetGeometry(); temp[0] = (geom[0].X() + geom[1].X() + geom[2].X() + geom[3].X())/4; temp[1] = (geom[0].Y() + geom[1].Y() + geom[2].Y() + geom[3].Y())/4; temp[2] = (geom[0].Z() + geom[1].Z() + geom[2].Z() + geom[3].Z())/4; PointInput[el_it-StaticMesh->ElementsBegin()] = PointType(temp); } int rest = 0; for(size_t i = 0; i < times; i++) { std::vector<SizeType> NumberOfResults(NumberOfPoints); std::vector<std::vector<PointerType> > Results(NumberOfPoints, std::vector<PointerType>(MaxNumberOfResults)); std::vector<std::vector<double> > ResultsDistances(NumberOfPoints, std::vector<double>(MaxNumberOfResults,0)); MPI_SearchInRadius(PointInput, NumberOfPoints, Radius, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults); MPI_Barrier(MPI_COMM_WORLD); if(i == times-1) { size_t max = 0; size_t min = MaxNumberOfResults; //Check Results for(size_t i = 0; i < NumberOfPoints; i++) { rest += NumberOfResults[i]; max = NumberOfResults[i] > max ? NumberOfResults[i] : max; min = NumberOfResults[i] < min ? NumberOfResults[i] : min; } std::cout << "(" << mpi_rank << ") Found: " << rest << " results, aprox " << rest/NumberOfPoints << " results per point. Max: " << max << " Min: " << min << std::endl; } } int total_size = 0; int total_resu = 0; int local_size = NumberOfPoints; int local_resu = rest; MPI_Allreduce(&local_size,&total_size,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); MPI_Allreduce(&local_resu,&total_resu,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); if(mpi_rank == 0) std::cout << "Total Point search: " << total_size << " " << total_resu << std::endl; Timer::Stop("ALL"); } void MPI_SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateType const& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) { CoordinateType Radius2 = Radius * Radius; SearchInRadiusMpiWrapperSingle( ThisPoints, NumberOfPoints, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults ); } /// Act as wrapper between external function and its implementation /** * This function provides all mpi functionality requiered to execute the parallel multi input searchInRaidus. * the method implemented by this function is one-to-many. It means all particles not found in the local * processes are send to all process intersecting the search radius of the particle * @param ThisPoints List of points to be search * @param NumberOfPoints Number of points to be search * @param Radius Radius of search * @param Radius2 Radius of search^2 * @param Results List of results * @param ResultsDistances Distance of the results * @param NumberOfResults Number of results * @param MaxNumberOfResults Maximum number of results returned for each point */ void SearchInRadiusMpiWrapperSingle( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateType const& Radius, CoordinateType const& Radius2, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults ) { std::vector<std::vector<PointerType> > remoteResults(mpi_size, std::vector<PointerType>(0)); std::vector<std::vector<PointerType> > SearchPetitions(mpi_size, std::vector<PointerType>(0)); std::vector<std::vector<PointerType> > SearchResults(mpi_size, std::vector<PointerType>(0)); std::vector<std::vector<PointerType> > SendPointToProcess(mpi_size, std::vector<PointerType>(0)); std::string messages[mpi_size]; int NumberOfSendPoints[mpi_size]; int NumberOfRecvPoints[mpi_size]; std::vector<bool> SendPoint(NumberOfPoints*mpi_size); int msgSendSize[mpi_size]; int msgRecvSize[mpi_size]; PointerType CommunicationToken = new PointType(); CommunicationToken->X() = std::numeric_limits<double>::max(); for(int i = 0; i < mpi_size; i++) { NumberOfSendPoints[i] = 0; msgSendSize[i] = 0; } //Local search Timer::Start("Calculate Local"); for(size_t i = 0; i < NumberOfPoints; i++) { IteratorType ResultsPointer = &Results[i][0]; double * ResultsDistancesPointer = &ResultsDistances[i][0]; NumberOfResults[i] = 0; SearchStructureType Box( CalculateCell(ThisPoints[i],-Radius), CalculateCell(ThisPoints[i],Radius), mN ); SearchInRadiusLocal(ThisPoints[i],Radius,Radius2,ResultsPointer,ResultsDistancesPointer,NumberOfResults[i],MaxNumberOfResults,Box); //For each point with results < MaxResults and each process excluding ourself if(NumberOfResults[i] < MaxNumberOfResults) { for(int j = 0; j < mpi_size; j++) { if(j != mpi_rank) { int intersect = 0; for(size_t k = 0; k < Dimension; k++) if((ThisPoints[i][k]+Radius >= mpi_MaxPoints[j][k] && ThisPoints[i][k]-Radius <= mpi_MaxPoints[j][k]) || (ThisPoints[i][k]+Radius >= mpi_MinPoints[j][k] && ThisPoints[i][k]-Radius <= mpi_MinPoints[j][k]) || (ThisPoints[i][k]-Radius >= mpi_MinPoints[j][k] && ThisPoints[i][k]+Radius <= mpi_MaxPoints[j][k]) ) intersect++; SendPoint[j*NumberOfPoints+i] = 0; if(intersect == Dimension) { SendPoint[j*NumberOfPoints+i]=1; NumberOfSendPoints[j]++; } } } } } for(int i = 0; i < mpi_size; i++) { if(i != mpi_rank && NumberOfSendPoints[i]) { int k = 0; SendPointToProcess[i].resize(NumberOfSendPoints[i]); for(size_t j = 0; j < NumberOfPoints; j++) if(SendPoint[i*NumberOfPoints+j]) SendPointToProcess[i][k++] = &ThisPoints[j]; } } Timer::Stop("Calculate Local"); Timer::Start("Transfer Particles"); // for(int i = 0; i < mpi_size; i++) // { // if(mpi_rank != i) // TConfigure::Save(SendPointToProcess[i],messages[i]); // msgSendSize[i] = messages[i].size(); // } // // PrepareCommunications(msgSendSize,msgRecvSize,NumberOfSendPoints,NumberOfRecvPoints); // AsyncSendAndRecive(messages,msgSendSize,msgRecvSize); // // for(int i = 0; i < mpi_size; i++) // { // if(mpi_rank != i && messages[i].size()) // TConfigure::Load(SearchPetitions[i],messages[i]); // } TConfigure::AsyncSendAndRecive(SendPointToProcess,SearchPetitions,msgSendSize,msgRecvSize); Timer::Stop("Transfer Particles"); Timer::Start("Calculate Remote"); //Calculate remote points for(int i = 0; i < mpi_size; i++) { if(i != mpi_rank && msgRecvSize[i]) { int accum_results = 0; NumberOfRecvPoints[i] = SearchPetitions[i].size(); std::vector<PointerType>& remoteSearchPetitions = SearchPetitions[i]; remoteResults[i].resize((MaxNumberOfResults+1)*NumberOfRecvPoints[i]); for(int j = 0; j < NumberOfRecvPoints[i]; j++) { IteratorType remoteResultsPointer = &remoteResults[i][accum_results]; PointType remotePointPointer = *remoteSearchPetitions[j]; SizeType thisNumberOfResults = 0; SearchStructureType Box( CalculateCell(remotePointPointer,-Radius), CalculateCell(remotePointPointer,Radius), mN ); SearchInRadiusLocal(remotePointPointer,Radius,Radius2,remoteResultsPointer,thisNumberOfResults,MaxNumberOfResults,Box); accum_results += thisNumberOfResults; remoteResults[i][accum_results++] = CommunicationToken; } remoteResults[i].resize(accum_results); NumberOfSendPoints[i] = accum_results; } } Timer::Stop("Calculate Remote"); Timer::Start("Transfer Results"); for(int i = 0; i < mpi_size; i++) { if(mpi_rank != i) TConfigure::Save(remoteResults[i],messages[i]); msgSendSize[i] = messages[i].size(); } PrepareCommunications(msgSendSize,msgRecvSize,NumberOfSendPoints,NumberOfRecvPoints); AsyncSendAndRecive(messages,msgSendSize,msgRecvSize); for(int i = 0; i < mpi_size; i++) { if(mpi_rank != i && messages[i].size()) TConfigure::Load(SearchResults[i],messages[i]); } Timer::Stop("Transfer Results"); Timer::Start("Prepare-C"); for (int i = 0; i < mpi_size; i++) { if (i != mpi_rank) { std::vector<PointerType>& remoteSearchResults = SearchResults[i]; int result_iterator = 0; for(size_t j = 0; j < NumberOfPoints; j++) { if(SendPoint[i*NumberOfPoints+j]) { int token = 0; for(; !token && result_iterator < NumberOfRecvPoints[i]; result_iterator++) { PointType& a = ThisPoints[j]; PointType& b = *remoteSearchResults[result_iterator]; if(b.X() == std::numeric_limits<double>::max()) token = 1; if (!token) { double dist = DistanceFunction()(a,b); if (dist <= Radius2) { if (NumberOfResults[j] < MaxNumberOfResults) { Results[j][NumberOfResults[j]] = remoteSearchResults[result_iterator]; ResultsDistances[j][NumberOfResults[j]] = dist; NumberOfResults[j]++; } } } } } } } } Timer::Stop("Prepare-C"); } /////////////////////////////////////////////////////////////////////////// // MPI Search In Radius /////////////////////////////////////////////////////////////////////////// PointerType ExistPoint( PointerType const& ThisPoint, CoordinateType const Tolerance = static_cast<CoordinateType>(10.0*DBL_EPSILON) ) { PointerType Nearest; CoordinateType Distance = static_cast<CoordinateType>(DBL_MAX); bool Found; SearchStructureType Box( CalculateCell(*ThisPoint,-Tolerance), CalculateCell(*ThisPoint,Tolerance), mN ); SearchNearestInBox( *ThisPoint, Nearest, Distance, Box, Found ); if(Found) return Nearest; return this->NullPointer(); } //************************************************************************ PointerType SearchNearestPoint( PointType const& ThisPoint ) { if( mPointBegin == mPointEnd ) return this->NullPointer(); PointerType Result = *mPointBegin; CoordinateType ResultDistance = static_cast<CoordinateType>(DBL_MAX); SearchStructureType Box( CalculateCell(ThisPoint), mN ); SearchNearestPointLocal( ThisPoint, Result, ResultDistance, Box ); return Result; } //************************************************************************ PointerType SearchNearestPoint( PointType const& ThisPoint, CoordinateType ResultDistance ) { if( mPointBegin == mPointEnd ) return this->NullPointer(); PointerType Result = *mPointBegin; ResultDistance = static_cast<CoordinateType>(DBL_MAX); SearchStructureType Box( CalculateCell(ThisPoint), mN ); SearchNearestPointLocal( ThisPoint, Result, ResultDistance, Box); return Result; } //************************************************************************ // New Thread Safe!!! PointerType SearchNearestPoint( PointType const& ThisPoint, CoordinateType& rResultDistance, SearchStructureType& Box ) { PointerType Result = *mPointBegin; //static_cast<PointerType>(NULL); rResultDistance = static_cast<CoordinateType>(DBL_MAX); Box.Set( CalculateCell(ThisPoint), mN ); SearchNearestPointLocal( ThisPoint, Result, rResultDistance, Box); return Result; } //************************************************************************ void SearchNearestPoint( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance ) { SearchStructureType Box; Box.Set( CalculateCell(ThisPoint), mN ); SearchNearestPointLocal(ThisPoint,rResult,rResultDistance,Box); } //************************************************************************ void SearchNearestPoint( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box ) { // This case is when BinStatic is a LeafType in Other Spacial Structure // Then, it is possible a better Result before this search Box.Set( CalculateCell(ThisPoint), mN ); SearchNearestPointLocal( ThisPoint, rResult, rResultDistance, Box ); } //************************************************************************ void SearchNearestPointLocal( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box ) { if( mPointBegin == mPointEnd ) return; bool Found = false; // set mBox Box.Set( CalculateCell(ThisPoint), mN ); // initial search ++Box; SearchNearestInBox( ThisPoint, rResult, rResultDistance, Box, Found ); // increase mBox and try again while(!Found) { ++Box; SearchNearestInBox( ThisPoint, rResult, rResultDistance, Box, Found ); } } //************************************************************************ SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results, DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults ) { CoordinateType Radius2 = Radius * Radius; SizeType NumberOfResults = 0; SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box ); return NumberOfResults; } //************************************************************************ SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results, DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) { CoordinateType Radius2 = Radius * Radius; SizeType NumberOfResults = 0; Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box ); return NumberOfResults; } //************************************************************************ void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) { SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box); } //************************************************************************ void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) { Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box); } //************************************************************************ // **** THREAD SAFE // Dimension = 1 void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box ) { for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults); } // Dimension = 2 void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box ) { for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults); } // Dimension = 3 void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box ) { for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block ) for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults); } //************************************************************************ /////////////////////////////////////////////////////////////////////////// // Multiple Input Search /////////////////////////////////////////////////////////////////////////// void MultiSearchInRadiusTest(const SizeType& NumberOfPoints, const SizeType& MaxNumberOfResults, const double& Radius, const SizeType& times) { //MultiSearch Test Timer::Start("ALL"); PointerType PointInput = new PointType[NumberOfPoints]; // std::vector<IteratorType Results>(NumberOfPoints * MaxNumberOfResults); // std::vector<std::vector<PointerType> > Results(NumberOfPoints, std::vector<PointerType>(MaxNumberOfResults)); // std::vector<std::vector<double> > ResultsDistances(NumberOfPoints, std::vector<double>(MaxNumberOfResults,0)); for (ModelPart::ElementsContainerType::iterator el_it = StaticMesh->ElementsBegin(); el_it != StaticMesh->ElementsEnd() && el_it-StaticMesh->ElementsBegin() < NumberOfPoints; el_it++) { PointType temp; Geometry<Node < 3 > >& geom = el_it->GetGeometry(); temp[0] = (geom[0].X() + geom[1].X() + geom[2].X() + geom[3].X())/4; temp[1] = (geom[0].Y() + geom[1].Y() + geom[2].Y() + geom[3].Y())/4; temp[2] = (geom[0].Z() + geom[1].Z() + geom[2].Z() + geom[3].Z())/4; PointInput[el_it-StaticMesh->ElementsBegin()] = PointType(temp); } int rest = 0; #pragma omp parallel for reduction(+:rest) for(size_t i = 0; i < NumberOfPoints; i++) { IteratorType Results = new PointerType[MaxNumberOfResults]; DistanceIteratorType Distances = new double[MaxNumberOfResults]; PointType mypoint = PointInput[i]; // IteratorType rest += SearchInRadius(mypoint, Radius, Results, Distances, MaxNumberOfResults); } std::cout << "Rest: " << rest << std::endl; Timer::Stop("ALL"); } SizeType SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateType const& Radius, std::vector<std::vector<PointerType> > Results, std::vector<std::vector<double> > ResultsDistances, SizeType const& MaxNumberOfResults ) { CoordinateType Radius2 = Radius * Radius; SizeType NumberOfResults = 0; /*SearchStructureType Box[NumberOfPoints]*/; // for(size_t i = 0; i < NumberOfPoints; i++) // Box[i] = SearchStructureType( CalculateCell(ThisPoints[i],-Radius), CalculateCell(ThisPoints[i],Radius), mN ); // SearchInRadiusLocal( ThisPoints, NumberOfPoints, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults/*, Box*/ ); return NumberOfResults; } //************************************************************************ SizeType SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateType const& Radius, IteratorType Results, DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults, SearchStructureType Box[] ) { CoordinateType Radius2 = Radius * Radius; SizeType NumberOfResults = 0; for(size_t i = 0; i < NumberOfPoints; i++) Box[i].Set( CalculateCell(ThisPoints[i],-Radius), CalculateCell(ThisPoints[i],Radius), mN ); SearchInRadiusLocal( ThisPoints, NumberOfPoints, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box ); return NumberOfResults; } //************************************************************************ void SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) { SearchStructureType Box[NumberOfPoints]; for(size_t i = 0; i < NumberOfPoints; i++) Box[i] = SearchStructureType( CalculateCell(ThisPoints[i],-Radius), CalculateCell(ThisPoints[i],Radius), mN ); SearchInRadiusLocal( ThisPoints, NumberOfPoints, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box); } //************************************************************************ void SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, CoordinateType const& Radius, CoordinateType const& Radius2, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults /*, SearchStructureType Box[]*/ ) { // for(size_t i = 0; i < NumberOfPoints; i++) // Box[i].Set( CalculateCell(ThisPoints[i],-Radius), CalculateCell(ThisPoints[i],Radius), mN ); SearchInRadiusLocal( ThisPoints, NumberOfPoints, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults); } //************************************************************************ // Dimension = 1 void SearchInRadiusLocal( PointerType const& ThisPoint, SizeType const& NumberOfPoints, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box ) { for(size_t i = 0; i < NumberOfPoints; i++) { SizeType thisNumberOfResults = 0; IteratorType ResulPointer = &Results[NumberOfResults]; DistanceIteratorType ResultsDistancesPointer = &ResultsDistances[NumberOfResults]; for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults); NumberOfResults += thisNumberOfResults; } } // Dimension = 2 void SearchInRadiusLocal( PointerType const& ThisPoint, SizeType const& NumberOfPoints, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box ) { for(size_t i = 0; i < NumberOfPoints; i++) { SizeType thisNumberOfResults = 0; IteratorType ResulPointer = &Results[NumberOfResults]; DistanceIteratorType ResultsDistancesPointer = &ResultsDistances[NumberOfResults]; for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults); NumberOfResults += thisNumberOfResults; } } // Dimension = 3 void SearchInRadiusLocal( PointerType const& ThisPoint, SizeType const& NumberOfPoints, CoordinateType const& Radius, CoordinateType const& Radius2, std::vector<std::vector<PointerType> > Results, std::vector<std::vector<double> > ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults /*, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3> * const& Box*/ ) { int ompSafe_NumberOfResults = 0; // #pragma omp parallel for reduction(+:ompSafe_NumberOfResults) for(size_t i = 0; i < NumberOfPoints; i++) { SizeType thisNumberOfResults = 0; IteratorType ResulPointer = &Results[i][0]; DistanceIteratorType ResultsDistancesPointer = &ResultsDistances[i][0]; SearchStructureType Box; Box.Set( CalculateCell(ThisPoint[i],-Radius), CalculateCell(ThisPoint[i],Radius), mN ); for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block ) for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) { SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint[i],Radius2,ResulPointer,ResultsDistancesPointer,thisNumberOfResults,MaxNumberOfResults); } ompSafe_NumberOfResults += thisNumberOfResults; } NumberOfResults = ompSafe_NumberOfResults; } /////////////////////////////////////////////////////////////////////////// // Thread Safe ? /////////////////////////////////////////////////////////////////////////// //************************************************************************ SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType Radius, IteratorType Results, SizeType MaxNumberOfResults ) { CoordinateType Radius2 = Radius * Radius; SizeType NumberOfResults = 0; SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box ); return NumberOfResults; } //************************************************************************ SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType Radius, IteratorType Results, SizeType MaxNumberOfResults, SearchStructureType& Box ) { CoordinateType Radius2 = Radius * Radius; SizeType NumberOfResults = 0; Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box ); return NumberOfResults; } //************************************************************************ void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) { SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box ); } //************************************************************************ void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) { Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN ); SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box ); } //************************************************************************ // **** THREAD SAFE // Dimension = 1 void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box ) { for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I++ ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults); } // Dimension = 2 void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box ) { for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I++ ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults); } // Dimension = 3 void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box ) { for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block ) for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I++ ) SearchRadiusInRange()(mPoints[I].begin(),mPoints[I].end(),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults); } //************************************************************************ //************************************************************************ // Dimension = 1 void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, bool& Found ) { Found = false; for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchNearestInRange()( mPoints[I].begin(), mPoints[I].end(), ThisPoint, ResultPoint, ResultDistance, Found ); } // Dimension = 2 void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, bool& Found ) { Found = false; for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchNearestInRange()( mPoints[I].begin(), mPoints[I].end(), ThisPoint, ResultPoint, ResultDistance, Found ); } // Dimension = 3 void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, bool& Found ) { Found = false; for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block ) for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchNearestInRange()( mPoints[I].begin(), mPoints[I].end(), ThisPoint, ResultPoint, ResultDistance, Found ); } //************************************************************************ //************************************************************************ SizeType SearchInBox( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType Results, SizeType MaxNumberOfResults ) { SizeType NumberOfResults = 0; SearchStructureType Box( CalculateCell(SearchMinPoint), CalculateCell(SearchMaxPoint), mN ); SearchInBoxLocal( SearchMinPoint, SearchMaxPoint, Results, NumberOfResults, MaxNumberOfResults, Box ); return NumberOfResults; } //************************************************************************ void SearchInBox(PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& Results, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) { NumberOfResults = 0; SearchStructureType Box( CalculateCell(SearchMinPoint), CalculateCell(SearchMaxPoint), mN ); SearchInBoxLocal( SearchMinPoint, SearchMaxPoint, Results, NumberOfResults, MaxNumberOfResults, Box ); } //************************************************************************ // Dimension = 1 void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box ) { for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,mPoints[I].begin(),mPoints[I].end(),ResultsPoint,NumberOfResults,MaxNumberOfResults); } // Dimension = 2 void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box ) { for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,mPoints[I].begin(),mPoints[I].end(),ResultsPoint,NumberOfResults,MaxNumberOfResults); } // Dimension = 3 void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box ) { for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block ) for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block ) for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block ) SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,mPoints[I].begin(),mPoints[I].end(),ResultsPoint,NumberOfResults,MaxNumberOfResults); } //************************************************************************ /// Turn back information as a string. virtual std::string Info() const { return "BinsDynamicMpi"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "BinsDynamicMpi"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream, std::string const& Perfix = std::string()) const { rOStream << Perfix << "Bin[" << SearchUtils::PointerDistance(mPointBegin, mPointEnd) << "] : " << std::endl; for(typename CellsContainerType::const_iterator i_cell = mPoints.begin() ; i_cell != mPoints.end() ; i_cell++) { rOStream << Perfix << "[ " ; for(typename PointVector::const_iterator i_point = i_cell->begin() ; i_point != i_cell->end() ; i_point++) rOStream << **i_point << " "; rOStream << " ]" << std::endl; } rOStream << std::endl; } /// Print Size of Container void PrintSize( std::ostream& rout ){ rout << " BinsSize: "; for(SizeType i = 0 ; i < Dimension ; i++) rout << "[" << mN[i] << "]"; rout << std::endl; } /// Print Limits Points of the Container void PrintBox( std::ostream& rout ){ rout << " BinsBox: Min ["; mMinPoint.Print(rout); rout << "]; Max ["; mMaxPoint.Print(rout); rout << "]; Size ["; mCellSize.Print(rout); rout << "]" << std::endl; } /// Assignment operator. BinsDynamicMpi& operator=(BinsDynamicMpi const& rOther); /// Copy constructor. BinsDynamicMpi(BinsDynamicMpi const& rOther); private: IteratorType mPointBegin; IteratorType mPointEnd; Tvector<CoordinateType,Dimension> mMinPoint; Tvector<CoordinateType,Dimension> mMaxPoint; Tvector<CoordinateType,Dimension> mCellSize; Tvector<CoordinateType,Dimension> mInvCellSize; Tvector<SizeType,Dimension> mN; SizeType mNumPoints; ModelPart * StaticMesh; // Bins Access Vector ( vector<Iterator> ) CellsContainerType mPoints; // Work Variables ( For non-copy of Search Variables ) //BinBox SearchBox; //MPI interface int mpi_rank; int mpi_size; //MPI Communication vector<int> mpi_connectivity; vector<vector<double> > mpi_MinPoints; vector<vector<double> > mpi_MaxPoints; public: // static TreeNodeType* Construct(IteratorType PointsBegin, IteratorType PointsEnd, PointType MaxPoint, PointType MinPoint, SizeType BucketSize) // { // // SizeType number_of_points = SearchUtils::PointerDistance(PointsBegin,PointsEnd); // if (number_of_points == 0) // return NULL; // else // { // return new BinsDynamicMpi( PointsBegin, PointsEnd, MinPoint, MaxPoint, BucketSize ); // } // // } }; template<class TConfigure> std::ostream & operator<<( std::ostream& rOStream, BinsDynamicMpi<TConfigure>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintSize(rOStream); rThis.PrintData(rOStream); return rOStream; } } #endif // KRATOS_BINS_DYNAMIC_MPI_CONTAINER_H_INCLUD
omp.c
/********************************************************************** *** NAME : Bo Cimino *** *** CLASS : CSc 318 *** *** DUE DATE : 04/02/2014 *** *** INSTRUCTUOR : GAMRADT *** *********************************************************************** *** DESCRIPTION: This is the 5th assignment for Parallel Programming. This uses a riemann sum to calculate pi, should the user's interval be from 0 to 1; The function is hardcoded in. The objective here is to compare different performance obtained by adding more threads to do the work. Data parallelism is what is implemented here. This assignment difers from 4 because of more detailed omp clauses. In addition we use a parallel for directive. **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> const double pi = 3.1415926535897932384626433; void welcome(); void ReadCommandLine(const int argc, const char* argv[], double* xInit, double* xEnd, unsigned int* numThreads, unsigned long* numParts); void calcPi(double xInit, double xEnd, unsigned long partitions, unsigned int threads, double* glob_result); double calcFx(double x); void display(double myPie, double time, unsigned long partitions); int main(const int argc, const char* argv[]) { unsigned int threads; unsigned long partitions; double myPie = 0.0, current_time, ti, tf, xInit, xEnd; welcome(); ReadCommandLine(argc, argv, &xInit, &xEnd, &threads, &partitions); system("clear"); ti = omp_get_wtime(); calcPi(xInit, xEnd, partitions, threads, &myPie); tf = omp_get_wtime(); current_time = tf-ti; display(myPie, current_time, partitions); return 0; } void welcome() { printf("This program will, if your range is from 0 to 1, calculate pi.\n"); printf("If all your arguments were not entered via command line,\nyou will be prompted.\n\n"); } /********************************************************************************** *** FUNCTION ReadCommandLine *** *********************************************************************************** *** DESCRIPTION : This function reads in input from the command line. In this program this is the number of partitions desired. *** *** INPUT ARGS : argc, argv *** *** OUTPUT ARGS : none *** *** IN/OUT ARGS : partitions *** *** RETURN : void *** **********************************************************************************/ void ReadCommandLine(const int argc, const char* argv[], double* xInit, double* xEnd, unsigned int* numThreads, unsigned long* numParts) { if(argc != 5) { printf("Enter starting point -> "); scanf("%lf", xInit); printf("Enter ending point -> "); scanf("%lf", xEnd); printf("Enter no. of threads -> "); scanf("%ud", numThreads); printf("Enter no. of partitions -> "); scanf("%lu", numParts); } else { *xInit = atof(argv[1]); *xEnd = atof(argv[2]); *numThreads = atoi(argv[3]); *numParts = atol(argv[4]); } } /********************************************************************************** *** FUNCTION calcPi *** *********************************************************************************** *** DESCRIPTION : Calculates pi using the area under the curve based on the function in calcFx on the interval [0, 1]. *** *** INPUT ARGS : partitions *** *** OUTPUT ARGS : none *** *** IN/OUT ARGS : none *** *** RETURN : double *** **********************************************************************************/ void calcPi(double xInit, double xEnd, unsigned long partitions, unsigned int threads, double* glob_total) { unsigned long i; double local_total, width, half_width, x; width = (xEnd-xInit)/ (float) partitions; half_width = width / 2.0; local_total = 0; #pragma omp parallel num_threads(threads) default(none) private(i,x) shared(partitions, width, half_width) reduction(+:local_total) { int thread_num = omp_get_thread_num(); int num_threads = omp_get_num_threads(); #pragma omp for for(i = 0; i < partitions; i++) { x = half_width + i * width; local_total += calcFx(x); } local_total = local_total * width * 4.0; #pragma omp critical { printf("Thread %d of %d total: % 25.15f\n", thread_num, num_threads, local_total); } } *glob_total = (local_total); } /********************************************************************************** *** FUNCTION calcFx *** *********************************************************************************** *** DESCRIPTION : Returns the value of f(x) at x f(x) = 1/(1+x^2) *** *** INPUT ARGS : x *** *** OUTPUT ARGS : none *** *** IN/OUT ARGS : none *** *** RETURN : float *** **********************************************************************************/ double calcFx(double x) { return 1/(1+ x*x); } /********************************************************************************** *** FUNCTION display *** *********************************************************************************** *** DESCRIPTION : Displays output *** *** INPUT ARGS : myPie, time, partitions *** *** OUTPUT ARGS : none *** *** IN/OUT ARGS : none *** *** RETURN : void *** **********************************************************************************/ void display(double myPie, double time, unsigned long partitions) { double diff = myPie-pi; printf("Partitions : %16.1lu\n\n", partitions); printf("Real Pi : %26.15f\n", pi); printf("Calculated Pi : %26.15f\n", myPie); printf("Difference : %26.15f\n\n", fabs(diff)); printf("Time to calculate : %26.15f\n\n", time); }
Pattern.h
/***************************************************************************** * * Copyright (c) 2003-2018 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014 by Centre for Geoscience Computing (GeoComp) * *****************************************************************************/ /****************************************************************************/ /* Paso: CSC/CSR sparse matrix pattern */ /****************************************************************************/ /* Author: Lutz Gross, l.gross@uq.edu.au */ /****************************************************************************/ #ifndef __PASO_PATTERN_H__ #define __PASO_PATTERN_H__ #include "Paso.h" #include <escript/IndexList.h> namespace paso { struct Pattern; typedef boost::shared_ptr<Pattern> Pattern_ptr; typedef boost::shared_ptr<const Pattern> const_Pattern_ptr; struct Pattern : boost::enable_shared_from_this<Pattern> { Pattern(int type, dim_t numOutput, dim_t numInput, index_t* ptr, index_t* index); ~Pattern(); Pattern_ptr unrollBlocks(int newType, dim_t outputBlockSize, dim_t inputBlockSize); Pattern_ptr getSubpattern(dim_t newNumRows, dim_t newNumCols, const index_t* rowList, const index_t* newColIndex) const; /// Searches for a maximal independent set MIS in the matrix pattern void mis(index_t* mis_marker) const; void reduceBandwidth(index_t* oldToNew); Pattern_ptr multiply(int type, const_Pattern_ptr other) const; Pattern_ptr binop(int type, const_Pattern_ptr other) const; index_t* borrowMainDiagonalPointer(); static Pattern_ptr fromIndexListArray(dim_t n0, dim_t n, const escript::IndexList* index_list_array, index_t range_min, index_t range_max, index_t index_offset); index_t* borrowColoringPointer(); dim_t getBandwidth(index_t* label) const; inline bool isEmpty() const { return (!ptr && !index); } inline dim_t getNumColors() { // make sure numColors is defined borrowColoringPointer(); return numColors; } inline dim_t maxDeg() const { dim_t deg = 0; #pragma omp parallel { dim_t loc_deg=0; #pragma omp for for (dim_t i = 0; i < numInput; ++i) { loc_deg=std::max(loc_deg, ptr[i+1]-ptr[i]); } #pragma omp critical { deg = std::max(deg, loc_deg); } } return deg; } int type; // Number of rows in the ptr array [CSR] / number of cols for CSC dim_t numOutput; // Number of cols [CSR] dim_t numInput; // number of non-zeros dim_t len; // ptr[n] to ptr[n+1] lists indices (in index) of non-zeros in row n index_t* ptr; // Non-major indices of non-zeros (in CSR this will be col numbers) index_t* index; // pointer to main diagonal entry index_t* main_iptr; // number of colors dim_t numColors; // coloring index: inputs with the same color are not connected index_t* coloring; }; } // namespace paso #endif // __PASO_PATTERN_H__
_phono3py.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <assert.h> #include <math.h> #include <numpy/arrayobject.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include "lapack_wrapper.h" #include "phono3py.h" #include "phonoc_array.h" static PyObject *py_get_interaction(PyObject *self, PyObject *args); static PyObject *py_get_pp_collision(PyObject *self, PyObject *args); static PyObject *py_get_pp_collision_with_sigma(PyObject *self, PyObject *args); static PyObject *py_get_imag_self_energy_with_g(PyObject *self, PyObject *args); static PyObject *py_get_detailed_imag_self_energy_with_g(PyObject *self, PyObject *args); static PyObject *py_get_real_self_energy_at_bands(PyObject *self, PyObject *args); static PyObject *py_get_real_self_energy_at_frequency_point(PyObject *self, PyObject *args); static PyObject *py_get_collision_matrix(PyObject *self, PyObject *args); static PyObject *py_get_reducible_collision_matrix(PyObject *self, PyObject *args); static PyObject *py_symmetrize_collision_matrix(PyObject *self, PyObject *args); static PyObject *py_expand_collision_matrix(PyObject *self, PyObject *args); static PyObject *py_distribute_fc3(PyObject *self, PyObject *args); static PyObject *py_rotate_delta_fc2s(PyObject *self, PyObject *args); static PyObject *py_get_isotope_strength(PyObject *self, PyObject *args); static PyObject *py_get_thm_isotope_strength(PyObject *self, PyObject *args); static PyObject *py_get_permutation_symmetry_fc3(PyObject *self, PyObject *args); static PyObject *py_get_permutation_symmetry_compact_fc3(PyObject *self, PyObject *args); static PyObject *py_transpose_compact_fc3(PyObject *self, PyObject *args); static PyObject *py_get_neighboring_grid_points(PyObject *self, PyObject *args); static PyObject *py_get_thm_integration_weights_at_grid_points(PyObject *self, PyObject *args); static PyObject *py_tpl_get_triplets_reciprocal_mesh_at_q(PyObject *self, PyObject *args); static PyObject *py_tpl_get_BZ_triplets_at_q(PyObject *self, PyObject *args); static PyObject *py_get_triplets_integration_weights(PyObject *self, PyObject *args); static PyObject *py_get_triplets_integration_weights_with_sigma(PyObject *self, PyObject *args); static PyObject *py_get_grid_index_from_address(PyObject *self, PyObject *args); static PyObject *py_get_gr_grid_addresses(PyObject *self, PyObject *args); static PyObject *py_get_reciprocal_rotations(PyObject *self, PyObject *args); static PyObject *py_transform_rotations(PyObject *self, PyObject *args); static PyObject *py_get_snf3x3(PyObject *self, PyObject *args); static PyObject *py_get_ir_grid_map(PyObject *self, PyObject *args); static PyObject *py_get_bz_grid_addresses(PyObject *self, PyObject *args); static PyObject *py_rotate_bz_grid_addresses(PyObject *self, PyObject *args); static PyObject *py_diagonalize_collision_matrix(PyObject *self, PyObject *args); static PyObject *py_pinv_from_eigensolution(PyObject *self, PyObject *args); static PyObject *py_get_default_colmat_solver(PyObject *self, PyObject *args); static void pinv_from_eigensolution(double *data, const double *eigvals, const long size, const double cutoff, const long pinv_method); static void show_colmat_info(const PyArrayObject *collision_matrix_py, const long i_sigma, const long i_temp, const long adrs_shift); static Larray *convert_to_larray(const PyArrayObject *npyary); static Darray *convert_to_darray(const PyArrayObject *npyary); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state *)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject *error_out(PyObject *m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phono3py_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"interaction", (PyCFunction)py_get_interaction, METH_VARARGS, "Interaction of triplets"}, {"pp_collision", (PyCFunction)py_get_pp_collision, METH_VARARGS, "Collision and ph-ph calculation"}, {"pp_collision_with_sigma", (PyCFunction)py_get_pp_collision_with_sigma, METH_VARARGS, "Collision and ph-ph calculation for smearing method"}, {"imag_self_energy_with_g", (PyCFunction)py_get_imag_self_energy_with_g, METH_VARARGS, "Imaginary part of self energy at frequency points with g"}, {"detailed_imag_self_energy_with_g", (PyCFunction)py_get_detailed_imag_self_energy_with_g, METH_VARARGS, "Detailed contribution to imaginary part of self energy at frequency " "points with g"}, {"real_self_energy_at_bands", (PyCFunction)py_get_real_self_energy_at_bands, METH_VARARGS, "Real part of self energy from third order force constants"}, {"real_self_energy_at_frequency_point", (PyCFunction)py_get_real_self_energy_at_frequency_point, METH_VARARGS, "Real part of self energy from third order force constants at a frequency " "point"}, {"collision_matrix", (PyCFunction)py_get_collision_matrix, METH_VARARGS, "Collision matrix with g"}, {"reducible_collision_matrix", (PyCFunction)py_get_reducible_collision_matrix, METH_VARARGS, "Collision matrix with g for reducible grid points"}, {"symmetrize_collision_matrix", (PyCFunction)py_symmetrize_collision_matrix, METH_VARARGS, "Symmetrize collision matrix"}, {"expand_collision_matrix", (PyCFunction)py_expand_collision_matrix, METH_VARARGS, "Expand collision matrix"}, {"distribute_fc3", (PyCFunction)py_distribute_fc3, METH_VARARGS, "Distribute least fc3 to full fc3"}, {"rotate_delta_fc2s", (PyCFunction)py_rotate_delta_fc2s, METH_VARARGS, "Rotate delta fc2s"}, {"isotope_strength", (PyCFunction)py_get_isotope_strength, METH_VARARGS, "Isotope scattering strength"}, {"thm_isotope_strength", (PyCFunction)py_get_thm_isotope_strength, METH_VARARGS, "Isotope scattering strength for tetrahedron_method"}, {"permutation_symmetry_fc3", (PyCFunction)py_get_permutation_symmetry_fc3, METH_VARARGS, "Set permutation symmetry for fc3"}, {"permutation_symmetry_compact_fc3", (PyCFunction)py_get_permutation_symmetry_compact_fc3, METH_VARARGS, "Set permutation symmetry for compact-fc3"}, {"transpose_compact_fc3", (PyCFunction)py_transpose_compact_fc3, METH_VARARGS, "Transpose compact fc3"}, {"neighboring_grid_points", (PyCFunction)py_get_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"integration_weights_at_grid_points", (PyCFunction)py_get_thm_integration_weights_at_grid_points, METH_VARARGS, "Integration weights of tetrahedron method at grid points"}, {"triplets_reciprocal_mesh_at_q", (PyCFunction)py_tpl_get_triplets_reciprocal_mesh_at_q, METH_VARARGS, "Triplets on reciprocal mesh points at a specific q-point"}, {"BZ_triplets_at_q", (PyCFunction)py_tpl_get_BZ_triplets_at_q, METH_VARARGS, "Triplets in reciprocal primitive lattice are transformed to those in " "BZ."}, {"triplets_integration_weights", (PyCFunction)py_get_triplets_integration_weights, METH_VARARGS, "Integration weights of tetrahedron method for triplets"}, {"triplets_integration_weights_with_sigma", (PyCFunction)py_get_triplets_integration_weights_with_sigma, METH_VARARGS, "Integration weights of smearing method for triplets"}, {"grid_index_from_address", (PyCFunction)py_get_grid_index_from_address, METH_VARARGS, "Grid index from grid address"}, {"ir_grid_map", (PyCFunction)py_get_ir_grid_map, METH_VARARGS, "Reciprocal mesh points with ir grid mapping table"}, {"gr_grid_addresses", (PyCFunction)py_get_gr_grid_addresses, METH_VARARGS, "Get generalized regular grid addresses"}, {"reciprocal_rotations", (PyCFunction)py_get_reciprocal_rotations, METH_VARARGS, "Return rotation matrices in reciprocal space"}, {"transform_rotations", (PyCFunction)py_transform_rotations, METH_VARARGS, "Transform rotations to those in generalized regular grid"}, {"snf3x3", (PyCFunction)py_get_snf3x3, METH_VARARGS, "Get Smith formal form for 3x3 integer matrix"}, {"bz_grid_addresses", (PyCFunction)py_get_bz_grid_addresses, METH_VARARGS, "Get grid addresses including Brillouin zone surface"}, {"rotate_bz_grid_index", (PyCFunction)py_rotate_bz_grid_addresses, METH_VARARGS, "Rotate grid point considering Brillouin zone surface"}, {"diagonalize_collision_matrix", (PyCFunction)py_diagonalize_collision_matrix, METH_VARARGS, "Diagonalize and optionally pseudo-inverse using Lapack dsyev(d)"}, {"pinv_from_eigensolution", (PyCFunction)py_pinv_from_eigensolution, METH_VARARGS, "Pseudo-inverse from eigensolution"}, {"default_colmat_solver", (PyCFunction)py_get_default_colmat_solver, METH_VARARGS, "Return default collison matrix solver by integer value"}, {NULL, NULL, 0, NULL}}; #if PY_MAJOR_VERSION >= 3 static int _phono3py_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phono3py_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phono3py", NULL, sizeof(struct module_state), _phono3py_methods, NULL, _phono3py_traverse, _phono3py_clear, NULL}; #define INITERROR return NULL PyObject *PyInit__phono3py(void) #else #define INITERROR return void init_phono3py(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phono3py", _phono3py_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phono3py.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject *py_get_interaction(PyObject *self, PyObject *args) { PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_g_zero; PyArrayObject *py_frequencies; PyArrayObject *py_eigenvectors; PyArrayObject *py_triplets; PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_D_diag; PyArrayObject *py_Q; PyArrayObject *py_svecs; PyArrayObject *py_multi; PyArrayObject *py_fc3; PyArrayObject *py_masses; PyArrayObject *py_p2s_map; PyArrayObject *py_s2p_map; PyArrayObject *py_band_indices; double cutoff_frequency; long symmetrize_fc3_q; Darray *fc3_normal_squared; Darray *freqs; lapack_complex_double *eigvecs; long(*triplets)[3]; long num_triplets; char *g_zero; long(*bz_grid_addresses)[3]; long *D_diag; long(*Q)[3]; double *fc3; double(*svecs)[3]; long(*multi)[2]; double *masses; long *p2s; long *s2p; long *band_indices; long multi_dims[2]; long i; long is_compact_fc3; if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOOOld", &py_fc3_normal_squared, &py_g_zero, &py_frequencies, &py_eigenvectors, &py_triplets, &py_bz_grid_addresses, &py_D_diag, &py_Q, &py_fc3, &py_svecs, &py_multi, &py_masses, &py_p2s_map, &py_s2p_map, &py_band_indices, &symmetrize_fc3_q, &cutoff_frequency)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); freqs = convert_to_darray(py_frequencies); /* npy_cdouble and lapack_complex_double may not be compatible. */ /* So eigenvectors should not be used in Python side */ eigvecs = (lapack_complex_double *)PyArray_DATA(py_eigenvectors); triplets = (long(*)[3])PyArray_DATA(py_triplets); num_triplets = (long)PyArray_DIMS(py_triplets)[0]; g_zero = (char *)PyArray_DATA(py_g_zero); bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); D_diag = (long *)PyArray_DATA(py_D_diag); Q = (long(*)[3])PyArray_DATA(py_Q); fc3 = (double *)PyArray_DATA(py_fc3); if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) { is_compact_fc3 = 0; } else { is_compact_fc3 = 1; } svecs = (double(*)[3])PyArray_DATA(py_svecs); for (i = 0; i < 2; i++) { multi_dims[i] = PyArray_DIMS(py_multi)[i]; } multi = (long(*)[2])PyArray_DATA(py_multi); masses = (double *)PyArray_DATA(py_masses); p2s = (long *)PyArray_DATA(py_p2s_map); s2p = (long *)PyArray_DATA(py_s2p_map); band_indices = (long *)PyArray_DATA(py_band_indices); ph3py_get_interaction(fc3_normal_squared, g_zero, freqs, eigvecs, triplets, num_triplets, bz_grid_addresses, D_diag, Q, fc3, is_compact_fc3, svecs, multi_dims, multi, masses, p2s, s2p, band_indices, symmetrize_fc3_q, cutoff_frequency); free(fc3_normal_squared); fc3_normal_squared = NULL; free(freqs); freqs = NULL; Py_RETURN_NONE; } static PyObject *py_get_pp_collision(PyObject *self, PyObject *args) { PyArrayObject *py_gamma; PyArrayObject *py_relative_grid_address; PyArrayObject *py_frequencies; PyArrayObject *py_eigenvectors; PyArrayObject *py_triplets; PyArrayObject *py_triplet_weights; PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_bz_map; PyArrayObject *py_D_diag; PyArrayObject *py_Q; PyArrayObject *py_fc3; PyArrayObject *py_svecs; PyArrayObject *py_multi; PyArrayObject *py_masses; PyArrayObject *py_p2s_map; PyArrayObject *py_s2p_map; PyArrayObject *py_band_indices; PyArrayObject *py_temperatures; double cutoff_frequency; long is_NU; long symmetrize_fc3_q; long bz_grid_type; double *gamma; long(*relative_grid_address)[4][3]; double *frequencies; lapack_complex_double *eigenvectors; long(*triplets)[3]; long num_triplets; long *triplet_weights; long(*bz_grid_addresses)[3]; long *bz_map; long *D_diag; long(*Q)[3]; double *fc3; double(*svecs)[3]; long(*multi)[2]; double *masses; long *p2s; long *s2p; Larray *band_indices; Darray *temperatures; long multi_dims[2]; long i; long is_compact_fc3; if (!PyArg_ParseTuple( args, "OOOOOOOOlOOOOOOOOOOlld", &py_gamma, &py_relative_grid_address, &py_frequencies, &py_eigenvectors, &py_triplets, &py_triplet_weights, &py_bz_grid_addresses, &py_bz_map, &bz_grid_type, &py_D_diag, &py_Q, &py_fc3, &py_svecs, &py_multi, &py_masses, &py_p2s_map, &py_s2p_map, &py_band_indices, &py_temperatures, &is_NU, &symmetrize_fc3_q, &cutoff_frequency)) { return NULL; } gamma = (double *)PyArray_DATA(py_gamma); relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); frequencies = (double *)PyArray_DATA(py_frequencies); eigenvectors = (lapack_complex_double *)PyArray_DATA(py_eigenvectors); triplets = (long(*)[3])PyArray_DATA(py_triplets); num_triplets = (long)PyArray_DIMS(py_triplets)[0]; triplet_weights = (long *)PyArray_DATA(py_triplet_weights); bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); bz_map = (long *)PyArray_DATA(py_bz_map); D_diag = (long *)PyArray_DATA(py_D_diag); Q = (long(*)[3])PyArray_DATA(py_Q); fc3 = (double *)PyArray_DATA(py_fc3); if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) { is_compact_fc3 = 0; } else { is_compact_fc3 = 1; } svecs = (double(*)[3])PyArray_DATA(py_svecs); for (i = 0; i < 2; i++) { multi_dims[i] = PyArray_DIMS(py_multi)[i]; } multi = (long(*)[2])PyArray_DATA(py_multi); masses = (double *)PyArray_DATA(py_masses); p2s = (long *)PyArray_DATA(py_p2s_map); s2p = (long *)PyArray_DATA(py_s2p_map); band_indices = convert_to_larray(py_band_indices); temperatures = convert_to_darray(py_temperatures); ph3py_get_pp_collision( gamma, relative_grid_address, frequencies, eigenvectors, triplets, num_triplets, triplet_weights, bz_grid_addresses, bz_map, bz_grid_type, D_diag, Q, fc3, is_compact_fc3, svecs, multi_dims, multi, masses, p2s, s2p, band_indices, temperatures, is_NU, symmetrize_fc3_q, cutoff_frequency); free(band_indices); band_indices = NULL; free(temperatures); temperatures = NULL; Py_RETURN_NONE; } static PyObject *py_get_pp_collision_with_sigma(PyObject *self, PyObject *args) { PyArrayObject *py_gamma; PyArrayObject *py_frequencies; PyArrayObject *py_eigenvectors; PyArrayObject *py_triplets; PyArrayObject *py_triplet_weights; PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_D_diag; PyArrayObject *py_Q; PyArrayObject *py_fc3; PyArrayObject *py_svecs; PyArrayObject *py_multi; PyArrayObject *py_masses; PyArrayObject *py_p2s_map; PyArrayObject *py_s2p_map; PyArrayObject *py_band_indices; PyArrayObject *py_temperatures; long is_NU; long symmetrize_fc3_q; double sigma; double sigma_cutoff; double cutoff_frequency; double *gamma; double *frequencies; lapack_complex_double *eigenvectors; long(*triplets)[3]; long num_triplets; long *triplet_weights; long(*bz_grid_addresses)[3]; long *D_diag; long(*Q)[3]; double *fc3; double(*svecs)[3]; long(*multi)[2]; double *masses; long *p2s; long *s2p; Larray *band_indices; Darray *temperatures; long multi_dims[2]; long i; long is_compact_fc3; if (!PyArg_ParseTuple(args, "OddOOOOOOOOOOOOOOOlld", &py_gamma, &sigma, &sigma_cutoff, &py_frequencies, &py_eigenvectors, &py_triplets, &py_triplet_weights, &py_bz_grid_addresses, &py_D_diag, &py_Q, &py_fc3, &py_svecs, &py_multi, &py_masses, &py_p2s_map, &py_s2p_map, &py_band_indices, &py_temperatures, &is_NU, &symmetrize_fc3_q, &cutoff_frequency)) { return NULL; } gamma = (double *)PyArray_DATA(py_gamma); frequencies = (double *)PyArray_DATA(py_frequencies); eigenvectors = (lapack_complex_double *)PyArray_DATA(py_eigenvectors); triplets = (long(*)[3])PyArray_DATA(py_triplets); num_triplets = (long)PyArray_DIMS(py_triplets)[0]; triplet_weights = (long *)PyArray_DATA(py_triplet_weights); bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); D_diag = (long *)PyArray_DATA(py_D_diag); Q = (long(*)[3])PyArray_DATA(py_Q); fc3 = (double *)PyArray_DATA(py_fc3); if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) { is_compact_fc3 = 0; } else { is_compact_fc3 = 1; } svecs = (double(*)[3])PyArray_DATA(py_svecs); for (i = 0; i < 2; i++) { multi_dims[i] = PyArray_DIMS(py_multi)[i]; } multi = (long(*)[2])PyArray_DATA(py_multi); masses = (double *)PyArray_DATA(py_masses); p2s = (long *)PyArray_DATA(py_p2s_map); s2p = (long *)PyArray_DATA(py_s2p_map); band_indices = convert_to_larray(py_band_indices); temperatures = convert_to_darray(py_temperatures); ph3py_get_pp_collision_with_sigma( gamma, sigma, sigma_cutoff, frequencies, eigenvectors, triplets, num_triplets, triplet_weights, bz_grid_addresses, D_diag, Q, fc3, is_compact_fc3, svecs, multi_dims, multi, masses, p2s, s2p, band_indices, temperatures, is_NU, symmetrize_fc3_q, cutoff_frequency); free(band_indices); band_indices = NULL; free(temperatures); temperatures = NULL; Py_RETURN_NONE; } static PyObject *py_get_imag_self_energy_with_g(PyObject *self, PyObject *args) { PyArrayObject *py_gamma; PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_frequencies; PyArrayObject *py_triplets; PyArrayObject *py_triplet_weights; PyArrayObject *py_g; PyArrayObject *py_g_zero; double cutoff_frequency, temperature; long frequency_point_index; Darray *fc3_normal_squared; double *gamma; double *g; char *g_zero; double *frequencies; long(*triplets)[3]; long *triplet_weights; long num_frequency_points; if (!PyArg_ParseTuple(args, "OOOOOdOOdl", &py_gamma, &py_fc3_normal_squared, &py_triplets, &py_triplet_weights, &py_frequencies, &temperature, &py_g, &py_g_zero, &cutoff_frequency, &frequency_point_index)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); gamma = (double *)PyArray_DATA(py_gamma); g = (double *)PyArray_DATA(py_g); g_zero = (char *)PyArray_DATA(py_g_zero); frequencies = (double *)PyArray_DATA(py_frequencies); triplets = (long(*)[3])PyArray_DATA(py_triplets); triplet_weights = (long *)PyArray_DATA(py_triplet_weights); num_frequency_points = (long)PyArray_DIMS(py_g)[2]; ph3py_get_imag_self_energy_at_bands_with_g( gamma, fc3_normal_squared, frequencies, triplets, triplet_weights, g, g_zero, temperature, cutoff_frequency, num_frequency_points, frequency_point_index); free(fc3_normal_squared); fc3_normal_squared = NULL; Py_RETURN_NONE; } static PyObject *py_get_detailed_imag_self_energy_with_g(PyObject *self, PyObject *args) { PyArrayObject *py_gamma_detail; PyArrayObject *py_gamma_N; PyArrayObject *py_gamma_U; PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_frequencies; PyArrayObject *py_triplets; PyArrayObject *py_triplet_weights; PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_g; PyArrayObject *py_g_zero; double cutoff_frequency, temperature; Darray *fc3_normal_squared; double *gamma_detail; double *gamma_N; double *gamma_U; double *g; char *g_zero; double *frequencies; long(*triplets)[3]; long *triplet_weights; long(*bz_grid_addresses)[3]; if (!PyArg_ParseTuple(args, "OOOOOOOOdOOd", &py_gamma_detail, &py_gamma_N, &py_gamma_U, &py_fc3_normal_squared, &py_triplets, &py_triplet_weights, &py_bz_grid_addresses, &py_frequencies, &temperature, &py_g, &py_g_zero, &cutoff_frequency)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); gamma_detail = (double *)PyArray_DATA(py_gamma_detail); gamma_N = (double *)PyArray_DATA(py_gamma_N); gamma_U = (double *)PyArray_DATA(py_gamma_U); g = (double *)PyArray_DATA(py_g); g_zero = (char *)PyArray_DATA(py_g_zero); frequencies = (double *)PyArray_DATA(py_frequencies); triplets = (long(*)[3])PyArray_DATA(py_triplets); triplet_weights = (long *)PyArray_DATA(py_triplet_weights); bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); ph3py_get_detailed_imag_self_energy_at_bands_with_g( gamma_detail, gamma_N, gamma_U, fc3_normal_squared, frequencies, triplets, triplet_weights, bz_grid_addresses, g, g_zero, temperature, cutoff_frequency); free(fc3_normal_squared); fc3_normal_squared = NULL; Py_RETURN_NONE; } static PyObject *py_get_real_self_energy_at_bands(PyObject *self, PyObject *args) { PyArrayObject *py_shift; PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_frequencies; PyArrayObject *py_triplets; PyArrayObject *py_triplet_weights; PyArrayObject *py_band_indices; double epsilon, unit_conversion_factor, cutoff_frequency, temperature; Darray *fc3_normal_squared; double *shift; double *frequencies; long *band_indices; long(*triplets)[3]; long *triplet_weights; if (!PyArg_ParseTuple(args, "OOOOOOdddd", &py_shift, &py_fc3_normal_squared, &py_triplets, &py_triplet_weights, &py_frequencies, &py_band_indices, &temperature, &epsilon, &unit_conversion_factor, &cutoff_frequency)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); shift = (double *)PyArray_DATA(py_shift); frequencies = (double *)PyArray_DATA(py_frequencies); band_indices = (long *)PyArray_DATA(py_band_indices); triplets = (long(*)[3])PyArray_DATA(py_triplets); triplet_weights = (long *)PyArray_DATA(py_triplet_weights); ph3py_get_real_self_energy_at_bands( shift, fc3_normal_squared, band_indices, frequencies, triplets, triplet_weights, epsilon, temperature, unit_conversion_factor, cutoff_frequency); free(fc3_normal_squared); fc3_normal_squared = NULL; Py_RETURN_NONE; } static PyObject *py_get_real_self_energy_at_frequency_point(PyObject *self, PyObject *args) { PyArrayObject *py_shift; PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_frequencies; PyArrayObject *py_triplets; PyArrayObject *py_triplet_weights; PyArrayObject *py_band_indices; double frequency_point, epsilon, unit_conversion_factor, cutoff_frequency; double temperature; Darray *fc3_normal_squared; double *shift; double *frequencies; long *band_indices; long(*triplets)[3]; long *triplet_weights; if (!PyArg_ParseTuple(args, "OdOOOOOdddd", &py_shift, &frequency_point, &py_fc3_normal_squared, &py_triplets, &py_triplet_weights, &py_frequencies, &py_band_indices, &temperature, &epsilon, &unit_conversion_factor, &cutoff_frequency)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); shift = (double *)PyArray_DATA(py_shift); frequencies = (double *)PyArray_DATA(py_frequencies); band_indices = (long *)PyArray_DATA(py_band_indices); triplets = (long(*)[3])PyArray_DATA(py_triplets); triplet_weights = (long *)PyArray_DATA(py_triplet_weights); ph3py_get_real_self_energy_at_frequency_point( shift, frequency_point, fc3_normal_squared, band_indices, frequencies, triplets, triplet_weights, epsilon, temperature, unit_conversion_factor, cutoff_frequency); free(fc3_normal_squared); fc3_normal_squared = NULL; Py_RETURN_NONE; } static PyObject *py_get_collision_matrix(PyObject *self, PyObject *args) { PyArrayObject *py_collision_matrix; PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_frequencies; PyArrayObject *py_triplets; PyArrayObject *py_triplets_map; PyArrayObject *py_map_q; PyArrayObject *py_g; PyArrayObject *py_rotated_grid_points; PyArrayObject *py_rotations_cartesian; double temperature, unit_conversion_factor, cutoff_frequency; Darray *fc3_normal_squared; double *collision_matrix; double *g; double *frequencies; long(*triplets)[3]; long *triplets_map; long *map_q; long *rotated_grid_points; long num_gp, num_ir_gp, num_rot; double *rotations_cartesian; if (!PyArg_ParseTuple( args, "OOOOOOOOOddd", &py_collision_matrix, &py_fc3_normal_squared, &py_frequencies, &py_g, &py_triplets, &py_triplets_map, &py_map_q, &py_rotated_grid_points, &py_rotations_cartesian, &temperature, &unit_conversion_factor, &cutoff_frequency)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); collision_matrix = (double *)PyArray_DATA(py_collision_matrix); g = (double *)PyArray_DATA(py_g); frequencies = (double *)PyArray_DATA(py_frequencies); triplets = (long(*)[3])PyArray_DATA(py_triplets); triplets_map = (long *)PyArray_DATA(py_triplets_map); num_gp = (long)PyArray_DIMS(py_triplets_map)[0]; map_q = (long *)PyArray_DATA(py_map_q); rotated_grid_points = (long *)PyArray_DATA(py_rotated_grid_points); num_ir_gp = (long)PyArray_DIMS(py_rotated_grid_points)[0]; num_rot = (long)PyArray_DIMS(py_rotated_grid_points)[1]; rotations_cartesian = (double *)PyArray_DATA(py_rotations_cartesian); assert(num_rot == PyArray_DIMS(py_rotations_cartesian)[0]); assert(num_gp == PyArray_DIMS(py_frequencies)[0]); ph3py_get_collision_matrix(collision_matrix, fc3_normal_squared, frequencies, triplets, triplets_map, map_q, rotated_grid_points, rotations_cartesian, g, num_ir_gp, num_gp, num_rot, temperature, unit_conversion_factor, cutoff_frequency); free(fc3_normal_squared); fc3_normal_squared = NULL; Py_RETURN_NONE; } static PyObject *py_get_reducible_collision_matrix(PyObject *self, PyObject *args) { PyArrayObject *py_collision_matrix; PyArrayObject *py_fc3_normal_squared; PyArrayObject *py_frequencies; PyArrayObject *py_triplets; PyArrayObject *py_triplets_map; PyArrayObject *py_map_q; PyArrayObject *py_g; double temperature, unit_conversion_factor, cutoff_frequency; Darray *fc3_normal_squared; double *collision_matrix; double *g; double *frequencies; long(*triplets)[3]; long *triplets_map; long num_gp; long *map_q; if (!PyArg_ParseTuple( args, "OOOOOOOddd", &py_collision_matrix, &py_fc3_normal_squared, &py_frequencies, &py_g, &py_triplets, &py_triplets_map, &py_map_q, &temperature, &unit_conversion_factor, &cutoff_frequency)) { return NULL; } fc3_normal_squared = convert_to_darray(py_fc3_normal_squared); collision_matrix = (double *)PyArray_DATA(py_collision_matrix); g = (double *)PyArray_DATA(py_g); frequencies = (double *)PyArray_DATA(py_frequencies); triplets = (long(*)[3])PyArray_DATA(py_triplets); triplets_map = (long *)PyArray_DATA(py_triplets_map); num_gp = (long)PyArray_DIMS(py_triplets_map)[0]; map_q = (long *)PyArray_DATA(py_map_q); ph3py_get_reducible_collision_matrix( collision_matrix, fc3_normal_squared, frequencies, triplets, triplets_map, map_q, g, num_gp, temperature, unit_conversion_factor, cutoff_frequency); free(fc3_normal_squared); fc3_normal_squared = NULL; Py_RETURN_NONE; } static PyObject *py_symmetrize_collision_matrix(PyObject *self, PyObject *args) { PyArrayObject *py_collision_matrix; double *collision_matrix; long num_band, num_grid_points, num_temp, num_sigma; long num_column; if (!PyArg_ParseTuple(args, "O", &py_collision_matrix)) { return NULL; } collision_matrix = (double *)PyArray_DATA(py_collision_matrix); num_sigma = (long)PyArray_DIMS(py_collision_matrix)[0]; num_temp = (long)PyArray_DIMS(py_collision_matrix)[1]; num_grid_points = (long)PyArray_DIMS(py_collision_matrix)[2]; num_band = (long)PyArray_DIMS(py_collision_matrix)[3]; if (PyArray_NDIM(py_collision_matrix) == 8) { num_column = num_grid_points * num_band * 3; } else { num_column = num_grid_points * num_band; } ph3py_symmetrize_collision_matrix(collision_matrix, num_column, num_temp, num_sigma); Py_RETURN_NONE; } static PyObject *py_expand_collision_matrix(PyObject *self, PyObject *args) { PyArrayObject *py_collision_matrix; PyArrayObject *py_ir_grid_points; PyArrayObject *py_rot_grid_points; double *collision_matrix; long *rot_grid_points; long *ir_grid_points; long num_band, num_grid_points, num_temp, num_sigma, num_rot, num_ir_gp; if (!PyArg_ParseTuple(args, "OOO", &py_collision_matrix, &py_ir_grid_points, &py_rot_grid_points)) { return NULL; } collision_matrix = (double *)PyArray_DATA(py_collision_matrix); rot_grid_points = (long *)PyArray_DATA(py_rot_grid_points); ir_grid_points = (long *)PyArray_DATA(py_ir_grid_points); num_sigma = (long)PyArray_DIMS(py_collision_matrix)[0]; num_temp = (long)PyArray_DIMS(py_collision_matrix)[1]; num_grid_points = (long)PyArray_DIMS(py_collision_matrix)[2]; num_band = (long)PyArray_DIMS(py_collision_matrix)[3]; num_rot = (long)PyArray_DIMS(py_rot_grid_points)[0]; num_ir_gp = (long)PyArray_DIMS(py_ir_grid_points)[0]; ph3py_expand_collision_matrix(collision_matrix, rot_grid_points, ir_grid_points, num_ir_gp, num_grid_points, num_rot, num_sigma, num_temp, num_band); Py_RETURN_NONE; } static PyObject *py_get_isotope_strength(PyObject *self, PyObject *args) { PyArrayObject *py_gamma; PyArrayObject *py_frequencies; PyArrayObject *py_eigenvectors; PyArrayObject *py_band_indices; PyArrayObject *py_mass_variances; long grid_point; long num_grid_points; double cutoff_frequency; double sigma; double *gamma; double *frequencies; lapack_complex_double *eigenvectors; long *band_indices; double *mass_variances; long num_band, num_band0; if (!PyArg_ParseTuple(args, "OlOOOOldd", &py_gamma, &grid_point, &py_mass_variances, &py_frequencies, &py_eigenvectors, &py_band_indices, &num_grid_points, &sigma, &cutoff_frequency)) { return NULL; } gamma = (double *)PyArray_DATA(py_gamma); frequencies = (double *)PyArray_DATA(py_frequencies); eigenvectors = (lapack_complex_double *)PyArray_DATA(py_eigenvectors); band_indices = (long *)PyArray_DATA(py_band_indices); mass_variances = (double *)PyArray_DATA(py_mass_variances); num_band = (long)PyArray_DIMS(py_frequencies)[1]; num_band0 = (long)PyArray_DIMS(py_band_indices)[0]; ph3py_get_isotope_scattering_strength( gamma, grid_point, mass_variances, frequencies, eigenvectors, num_grid_points, band_indices, num_band, num_band0, sigma, cutoff_frequency); Py_RETURN_NONE; } static PyObject *py_get_thm_isotope_strength(PyObject *self, PyObject *args) { PyArrayObject *py_gamma; PyArrayObject *py_frequencies; PyArrayObject *py_eigenvectors; PyArrayObject *py_band_indices; PyArrayObject *py_mass_variances; PyArrayObject *py_ir_grid_points; PyArrayObject *py_weights; PyArrayObject *py_integration_weights; long grid_point; double cutoff_frequency; double *gamma; double *frequencies; long *ir_grid_points; long *weights; lapack_complex_double *eigenvectors; long *band_indices; double *mass_variances; long num_band, num_band0, num_ir_grid_points; double *integration_weights; if (!PyArg_ParseTuple(args, "OlOOOOOOOd", &py_gamma, &grid_point, &py_ir_grid_points, &py_weights, &py_mass_variances, &py_frequencies, &py_eigenvectors, &py_band_indices, &py_integration_weights, &cutoff_frequency)) { return NULL; } gamma = (double *)PyArray_DATA(py_gamma); frequencies = (double *)PyArray_DATA(py_frequencies); ir_grid_points = (long *)PyArray_DATA(py_ir_grid_points); weights = (long *)PyArray_DATA(py_weights); eigenvectors = (lapack_complex_double *)PyArray_DATA(py_eigenvectors); band_indices = (long *)PyArray_DATA(py_band_indices); mass_variances = (double *)PyArray_DATA(py_mass_variances); num_band = (long)PyArray_DIMS(py_frequencies)[1]; num_band0 = (long)PyArray_DIMS(py_band_indices)[0]; integration_weights = (double *)PyArray_DATA(py_integration_weights); num_ir_grid_points = (long)PyArray_DIMS(py_ir_grid_points)[0]; ph3py_get_thm_isotope_scattering_strength( gamma, grid_point, ir_grid_points, weights, mass_variances, frequencies, eigenvectors, num_ir_grid_points, band_indices, num_band, num_band0, integration_weights, cutoff_frequency); Py_RETURN_NONE; } static PyObject *py_distribute_fc3(PyObject *self, PyObject *args) { PyArrayObject *force_constants_third; long target; long source; PyArrayObject *rotation_cart_inv; PyArrayObject *atom_mapping_py; double *fc3; double *rot_cart_inv; long *atom_mapping; long num_atom; if (!PyArg_ParseTuple(args, "OllOO", &force_constants_third, &target, &source, &atom_mapping_py, &rotation_cart_inv)) { return NULL; } fc3 = (double *)PyArray_DATA(force_constants_third); rot_cart_inv = (double *)PyArray_DATA(rotation_cart_inv); atom_mapping = (long *)PyArray_DATA(atom_mapping_py); num_atom = (long)PyArray_DIMS(atom_mapping_py)[0]; ph3py_distribute_fc3(fc3, target, source, atom_mapping, num_atom, rot_cart_inv); Py_RETURN_NONE; } static PyObject *py_rotate_delta_fc2s(PyObject *self, PyObject *args) { PyArrayObject *py_fc3; PyArrayObject *py_delta_fc2s; PyArrayObject *py_inv_U; PyArrayObject *py_site_sym_cart; PyArrayObject *py_rot_map_syms; double(*fc3)[3][3][3]; double(*delta_fc2s)[3][3]; double *inv_U; double(*site_sym_cart)[3][3]; long *rot_map_syms; long num_atom, num_disp, num_site_sym; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc3, &py_delta_fc2s, &py_inv_U, &py_site_sym_cart, &py_rot_map_syms)) { return NULL; } /* (num_atom, num_atom, 3, 3, 3) */ fc3 = (double(*)[3][3][3])PyArray_DATA(py_fc3); /* (n_u1, num_atom, num_atom, 3, 3) */ delta_fc2s = (double(*)[3][3])PyArray_DATA(py_delta_fc2s); /* (3, n_u1 * n_sym) */ inv_U = (double *)PyArray_DATA(py_inv_U); /* (n_sym, 3, 3) */ site_sym_cart = (double(*)[3][3])PyArray_DATA(py_site_sym_cart); /* (n_sym, natom) */ rot_map_syms = (long *)PyArray_DATA(py_rot_map_syms); num_atom = (long)PyArray_DIMS(py_fc3)[0]; num_disp = (long)PyArray_DIMS(py_delta_fc2s)[0]; num_site_sym = (long)PyArray_DIMS(py_site_sym_cart)[0]; ph3py_rotate_delta_fc2(fc3, delta_fc2s, inv_U, site_sym_cart, rot_map_syms, num_atom, num_site_sym, num_disp); Py_RETURN_NONE; } static PyObject *py_get_permutation_symmetry_fc3(PyObject *self, PyObject *args) { PyArrayObject *py_fc3; double *fc3; long num_atom; if (!PyArg_ParseTuple(args, "O", &py_fc3)) { return NULL; } fc3 = (double *)PyArray_DATA(py_fc3); num_atom = (long)PyArray_DIMS(py_fc3)[0]; ph3py_get_permutation_symmetry_fc3(fc3, num_atom); Py_RETURN_NONE; } static PyObject *py_get_permutation_symmetry_compact_fc3(PyObject *self, PyObject *args) { PyArrayObject *py_fc3; PyArrayObject *py_permutations; PyArrayObject *py_s2pp_map; PyArrayObject *py_p2s_map; PyArrayObject *py_nsym_list; double *fc3; long *s2pp; long *p2s; long *nsym_list; long *perms; long n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc3, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc3 = (double *)PyArray_DATA(py_fc3); perms = (long *)PyArray_DATA(py_permutations); s2pp = (long *)PyArray_DATA(py_s2pp_map); p2s = (long *)PyArray_DATA(py_p2s_map); nsym_list = (long *)PyArray_DATA(py_nsym_list); n_patom = (long)PyArray_DIMS(py_fc3)[0]; n_satom = (long)PyArray_DIMS(py_fc3)[1]; ph3py_get_permutation_symmetry_compact_fc3(fc3, p2s, s2pp, nsym_list, perms, n_satom, n_patom); Py_RETURN_NONE; } static PyObject *py_transpose_compact_fc3(PyObject *self, PyObject *args) { PyArrayObject *py_fc3; PyArrayObject *py_permutations; PyArrayObject *py_s2pp_map; PyArrayObject *py_p2s_map; PyArrayObject *py_nsym_list; long t_type; double *fc3; long *s2pp; long *p2s; long *nsym_list; long *perms; long n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOOl", &py_fc3, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &t_type)) { return NULL; } fc3 = (double *)PyArray_DATA(py_fc3); perms = (long *)PyArray_DATA(py_permutations); s2pp = (long *)PyArray_DATA(py_s2pp_map); p2s = (long *)PyArray_DATA(py_p2s_map); nsym_list = (long *)PyArray_DATA(py_nsym_list); n_patom = (long)PyArray_DIMS(py_fc3)[0]; n_satom = (long)PyArray_DIMS(py_fc3)[1]; ph3py_transpose_compact_fc3(fc3, p2s, s2pp, nsym_list, perms, n_satom, n_patom, t_type); Py_RETURN_NONE; } static PyObject *py_get_neighboring_grid_points(PyObject *self, PyObject *args) { PyArrayObject *py_relative_grid_points; PyArrayObject *py_grid_points; PyArrayObject *py_relative_grid_address; PyArrayObject *py_D_diag; PyArrayObject *py_bz_grid_address; PyArrayObject *py_bz_map; long bz_grid_type; long *relative_grid_points; long *grid_points; long num_grid_points, num_relative_grid_address; long(*relative_grid_address)[3]; long *D_diag; long(*bz_grid_address)[3]; long *bz_map; if (!PyArg_ParseTuple(args, "OOOOOOl", &py_relative_grid_points, &py_grid_points, &py_relative_grid_address, &py_D_diag, &py_bz_grid_address, &py_bz_map, &bz_grid_type)) { return NULL; } relative_grid_points = (long *)PyArray_DATA(py_relative_grid_points); grid_points = (long *)PyArray_DATA(py_grid_points); num_grid_points = (long)PyArray_DIMS(py_grid_points)[0]; relative_grid_address = (long(*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = (long)PyArray_DIMS(py_relative_grid_address)[0]; D_diag = (long *)PyArray_DATA(py_D_diag); bz_grid_address = (long(*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (long *)PyArray_DATA(py_bz_map); ph3py_get_neighboring_gird_points( relative_grid_points, grid_points, relative_grid_address, D_diag, bz_grid_address, bz_map, bz_grid_type, num_grid_points, num_relative_grid_address); Py_RETURN_NONE; } static PyObject *py_get_thm_integration_weights_at_grid_points(PyObject *self, PyObject *args) { PyArrayObject *py_iw; PyArrayObject *py_frequency_points; PyArrayObject *py_relative_grid_address; PyArrayObject *py_D_diag; PyArrayObject *py_grid_points; PyArrayObject *py_frequencies; PyArrayObject *py_bz_grid_address; PyArrayObject *py_gp2irgp_map; PyArrayObject *py_bz_map; long bz_grid_type; char *function; double *iw; double *frequency_points; long num_frequency_points, num_band, num_gp; long(*relative_grid_address)[4][3]; long *D_diag; long *grid_points; long(*bz_grid_address)[3]; long *bz_map; long *gp2irgp_map; double *frequencies; if (!PyArg_ParseTuple(args, "OOOOOOOOOls", &py_iw, &py_frequency_points, &py_relative_grid_address, &py_D_diag, &py_grid_points, &py_frequencies, &py_bz_grid_address, &py_bz_map, &py_gp2irgp_map, &bz_grid_type, &function)) { return NULL; } iw = (double *)PyArray_DATA(py_iw); frequency_points = (double *)PyArray_DATA(py_frequency_points); num_frequency_points = (long)PyArray_DIMS(py_frequency_points)[0]; relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); D_diag = (long *)PyArray_DATA(py_D_diag); grid_points = (long *)PyArray_DATA(py_grid_points); num_gp = (long)PyArray_DIMS(py_grid_points)[0]; bz_grid_address = (long(*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (long *)PyArray_DATA(py_bz_map); gp2irgp_map = (long *)PyArray_DATA(py_gp2irgp_map); frequencies = (double *)PyArray_DATA(py_frequencies); num_band = (long)PyArray_DIMS(py_frequencies)[1]; ph3py_get_thm_integration_weights_at_grid_points( iw, frequency_points, num_frequency_points, num_band, num_gp, relative_grid_address, D_diag, grid_points, bz_grid_address, bz_map, bz_grid_type, frequencies, gp2irgp_map, function[0]); Py_RETURN_NONE; } static PyObject *py_tpl_get_triplets_reciprocal_mesh_at_q(PyObject *self, PyObject *args) { PyArrayObject *py_map_triplets; PyArrayObject *py_map_q; PyArrayObject *py_D_diag; PyArrayObject *py_rotations; long fixed_grid_number; long is_time_reversal; long swappable; long *map_triplets; long *map_q; long *D_diag; long(*rot)[3][3]; long num_rot; long num_ir; if (!PyArg_ParseTuple(args, "OOlOlOl", &py_map_triplets, &py_map_q, &fixed_grid_number, &py_D_diag, &is_time_reversal, &py_rotations, &swappable)) { return NULL; } map_triplets = (long *)PyArray_DATA(py_map_triplets); map_q = (long *)PyArray_DATA(py_map_q); D_diag = (long *)PyArray_DATA(py_D_diag); rot = (long(*)[3][3])PyArray_DATA(py_rotations); num_rot = (long)PyArray_DIMS(py_rotations)[0]; num_ir = ph3py_get_triplets_reciprocal_mesh_at_q( map_triplets, map_q, fixed_grid_number, D_diag, is_time_reversal, num_rot, rot, swappable); return PyLong_FromLong(num_ir); } static PyObject *py_tpl_get_BZ_triplets_at_q(PyObject *self, PyObject *args) { PyArrayObject *py_triplets; PyArrayObject *py_bz_grid_address; PyArrayObject *py_bz_map; PyArrayObject *py_map_triplets; PyArrayObject *py_D_diag; PyArrayObject *py_Q; long grid_point; long bz_grid_type; long(*triplets)[3]; long(*bz_grid_address)[3]; long *bz_map; long *map_triplets; long num_map_triplets; long *D_diag; long(*Q)[3]; long num_ir; if (!PyArg_ParseTuple(args, "OlOOOOOl", &py_triplets, &grid_point, &py_bz_grid_address, &py_bz_map, &py_map_triplets, &py_D_diag, &py_Q, &bz_grid_type)) { return NULL; } triplets = (long(*)[3])PyArray_DATA(py_triplets); bz_grid_address = (long(*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (long *)PyArray_DATA(py_bz_map); map_triplets = (long *)PyArray_DATA(py_map_triplets); num_map_triplets = (long)PyArray_DIMS(py_map_triplets)[0]; D_diag = (long *)PyArray_DATA(py_D_diag); Q = (long(*)[3])PyArray_DATA(py_Q); num_ir = ph3py_get_BZ_triplets_at_q(triplets, grid_point, bz_grid_address, bz_map, map_triplets, num_map_triplets, D_diag, Q, bz_grid_type); return PyLong_FromLong(num_ir); } static PyObject *py_get_triplets_integration_weights(PyObject *self, PyObject *args) { PyArrayObject *py_iw; PyArrayObject *py_iw_zero; PyArrayObject *py_frequency_points; PyArrayObject *py_relative_grid_address; PyArrayObject *py_D_diag; PyArrayObject *py_triplets; PyArrayObject *py_frequencies1; PyArrayObject *py_frequencies2; PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_bz_map; long bz_grid_type; long tp_type; double *iw; char *iw_zero; double *frequency_points; long(*relative_grid_address)[4][3]; long *D_diag; long(*triplets)[3]; long(*bz_grid_addresses)[3]; long *bz_map; double *frequencies1, *frequencies2; long num_band0, num_band1, num_band2, num_triplets; if (!PyArg_ParseTuple(args, "OOOOOOOOOOll", &py_iw, &py_iw_zero, &py_frequency_points, &py_relative_grid_address, &py_D_diag, &py_triplets, &py_frequencies1, &py_frequencies2, &py_bz_grid_addresses, &py_bz_map, &bz_grid_type, &tp_type)) { return NULL; } iw = (double *)PyArray_DATA(py_iw); iw_zero = (char *)PyArray_DATA(py_iw_zero); frequency_points = (double *)PyArray_DATA(py_frequency_points); num_band0 = (long)PyArray_DIMS(py_frequency_points)[0]; relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); D_diag = (long *)PyArray_DATA(py_D_diag); triplets = (long(*)[3])PyArray_DATA(py_triplets); num_triplets = (long)PyArray_DIMS(py_triplets)[0]; bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); bz_map = (long *)PyArray_DATA(py_bz_map); frequencies1 = (double *)PyArray_DATA(py_frequencies1); frequencies2 = (double *)PyArray_DATA(py_frequencies2); num_band1 = (long)PyArray_DIMS(py_frequencies1)[1]; num_band2 = (long)PyArray_DIMS(py_frequencies2)[1]; ph3py_get_integration_weight( iw, iw_zero, frequency_points, num_band0, relative_grid_address, D_diag, triplets, num_triplets, bz_grid_addresses, bz_map, bz_grid_type, frequencies1, num_band1, frequencies2, num_band2, tp_type, 1, 0); Py_RETURN_NONE; } static PyObject *py_get_triplets_integration_weights_with_sigma( PyObject *self, PyObject *args) { PyArrayObject *py_iw; PyArrayObject *py_iw_zero; PyArrayObject *py_frequency_points; PyArrayObject *py_triplets; PyArrayObject *py_frequencies; double sigma, sigma_cutoff; double *iw; char *iw_zero; double *frequency_points; long(*triplets)[3]; double *frequencies; long num_band0, num_band, num_iw, num_triplets; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_iw, &py_iw_zero, &py_frequency_points, &py_triplets, &py_frequencies, &sigma, &sigma_cutoff)) { return NULL; } iw = (double *)PyArray_DATA(py_iw); iw_zero = (char *)PyArray_DATA(py_iw_zero); frequency_points = (double *)PyArray_DATA(py_frequency_points); num_band0 = (long)PyArray_DIMS(py_frequency_points)[0]; triplets = (long(*)[3])PyArray_DATA(py_triplets); num_triplets = (long)PyArray_DIMS(py_triplets)[0]; frequencies = (double *)PyArray_DATA(py_frequencies); num_band = (long)PyArray_DIMS(py_frequencies)[1]; num_iw = (long)PyArray_DIMS(py_iw)[0]; ph3py_get_integration_weight_with_sigma( iw, iw_zero, sigma, sigma_cutoff, frequency_points, num_band0, triplets, num_triplets, frequencies, num_band, num_iw); Py_RETURN_NONE; } static PyObject *py_get_grid_index_from_address(PyObject *self, PyObject *args) { PyArrayObject *py_address; PyArrayObject *py_D_diag; long *address; long *D_diag; long gp; if (!PyArg_ParseTuple(args, "OO", &py_address, &py_D_diag)) { return NULL; } address = (long *)PyArray_DATA(py_address); D_diag = (long *)PyArray_DATA(py_D_diag); gp = ph3py_get_grid_index_from_address(address, D_diag); return PyLong_FromLong(gp); } static PyObject *py_get_gr_grid_addresses(PyObject *self, PyObject *args) { PyArrayObject *py_gr_grid_addresses; PyArrayObject *py_D_diag; long(*gr_grid_addresses)[3]; long *D_diag; if (!PyArg_ParseTuple(args, "OO", &py_gr_grid_addresses, &py_D_diag)) { return NULL; } gr_grid_addresses = (long(*)[3])PyArray_DATA(py_gr_grid_addresses); D_diag = (long *)PyArray_DATA(py_D_diag); ph3py_get_gr_grid_addresses(gr_grid_addresses, D_diag); Py_RETURN_NONE; } static PyObject *py_get_reciprocal_rotations(PyObject *self, PyObject *args) { PyArrayObject *py_rec_rotations; PyArrayObject *py_rotations; long is_time_reversal; long(*rec_rotations)[3][3]; long(*rotations)[3][3]; long num_rot, num_rec_rot; if (!PyArg_ParseTuple(args, "OOl", &py_rec_rotations, &py_rotations, &is_time_reversal)) { return NULL; } rec_rotations = (long(*)[3][3])PyArray_DATA(py_rec_rotations); rotations = (long(*)[3][3])PyArray_DATA(py_rotations); num_rot = (long)PyArray_DIMS(py_rotations)[0]; num_rec_rot = ph3py_get_reciprocal_rotations(rec_rotations, rotations, num_rot, is_time_reversal); return PyLong_FromLong(num_rec_rot); } static PyObject *py_transform_rotations(PyObject *self, PyObject *args) { PyArrayObject *py_transformed_rotations; PyArrayObject *py_rotations; PyArrayObject *py_D_diag; PyArrayObject *py_Q; long(*transformed_rotations)[3][3]; long(*rotations)[3][3]; long *D_diag; long(*Q)[3]; long num_rot, succeeded; if (!PyArg_ParseTuple(args, "OOOO", &py_transformed_rotations, &py_rotations, &py_D_diag, &py_Q)) { return NULL; } transformed_rotations = (long(*)[3][3])PyArray_DATA(py_transformed_rotations); rotations = (long(*)[3][3])PyArray_DATA(py_rotations); D_diag = (long *)PyArray_DATA(py_D_diag); Q = (long(*)[3])PyArray_DATA(py_Q); num_rot = (long)PyArray_DIMS(py_transformed_rotations)[0]; succeeded = ph3py_transform_rotations(transformed_rotations, rotations, num_rot, D_diag, Q); if (succeeded) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject *py_get_snf3x3(PyObject *self, PyObject *args) { PyArrayObject *py_D_diag; PyArrayObject *py_P; PyArrayObject *py_Q; PyArrayObject *py_A; long *D_diag; long(*P)[3]; long(*Q)[3]; long(*A)[3]; long succeeded; if (!PyArg_ParseTuple(args, "OOOO", &py_D_diag, &py_P, &py_Q, &py_A)) { return NULL; } D_diag = (long *)PyArray_DATA(py_D_diag); P = (long(*)[3])PyArray_DATA(py_P); Q = (long(*)[3])PyArray_DATA(py_Q); A = (long(*)[3])PyArray_DATA(py_A); succeeded = ph3py_get_snf3x3(D_diag, P, Q, A); if (succeeded) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject *py_get_ir_grid_map(PyObject *self, PyObject *args) { PyArrayObject *py_grid_mapping_table; PyArrayObject *py_D_diag; PyArrayObject *py_is_shift; PyArrayObject *py_rotations; long *D_diag; long *is_shift; long(*rot)[3][3]; long num_rot; long *grid_mapping_table; long num_ir; if (!PyArg_ParseTuple(args, "OOOO", &py_grid_mapping_table, &py_D_diag, &py_is_shift, &py_rotations)) { return NULL; } D_diag = (long *)PyArray_DATA(py_D_diag); is_shift = (long *)PyArray_DATA(py_is_shift); rot = (long(*)[3][3])PyArray_DATA(py_rotations); num_rot = (long)PyArray_DIMS(py_rotations)[0]; grid_mapping_table = (long *)PyArray_DATA(py_grid_mapping_table); num_ir = ph3py_get_ir_grid_map(grid_mapping_table, D_diag, is_shift, rot, num_rot); return PyLong_FromLong(num_ir); } static PyObject *py_get_bz_grid_addresses(PyObject *self, PyObject *args) { PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_bz_map; PyArrayObject *py_bzg2grg; PyArrayObject *py_D_diag; PyArrayObject *py_Q; PyArrayObject *py_PS; PyArrayObject *py_reciprocal_lattice; long type; long(*bz_grid_addresses)[3]; long *bz_map; long *bzg2grg; long *D_diag; long(*Q)[3]; long *PS; double(*reciprocal_lattice)[3]; long num_total_gp; if (!PyArg_ParseTuple(args, "OOOOOOOl", &py_bz_grid_addresses, &py_bz_map, &py_bzg2grg, &py_D_diag, &py_Q, &py_PS, &py_reciprocal_lattice, &type)) { return NULL; } bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); bz_map = (long *)PyArray_DATA(py_bz_map); bzg2grg = (long *)PyArray_DATA(py_bzg2grg); D_diag = (long *)PyArray_DATA(py_D_diag); Q = (long(*)[3])PyArray_DATA(py_Q); PS = (long *)PyArray_DATA(py_PS); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice); num_total_gp = ph3py_get_bz_grid_addresses(bz_grid_addresses, bz_map, bzg2grg, D_diag, Q, PS, reciprocal_lattice, type); return PyLong_FromLong(num_total_gp); } static PyObject *py_rotate_bz_grid_addresses(PyObject *self, PyObject *args) { PyArrayObject *py_bz_grid_addresses; PyArrayObject *py_rotation; PyArrayObject *py_bz_map; PyArrayObject *py_D_diag; PyArrayObject *py_PS; long bz_grid_index; long type; long(*bz_grid_addresses)[3]; long(*rotation)[3]; long *bz_map; long *D_diag; long *PS; long ret_bz_gp; if (!PyArg_ParseTuple(args, "lOOOOOl", &bz_grid_index, &py_rotation, &py_bz_grid_addresses, &py_bz_map, &py_D_diag, &py_PS, &type)) { return NULL; } bz_grid_addresses = (long(*)[3])PyArray_DATA(py_bz_grid_addresses); rotation = (long(*)[3])PyArray_DATA(py_rotation); bz_map = (long *)PyArray_DATA(py_bz_map); D_diag = (long *)PyArray_DATA(py_D_diag); PS = (long *)PyArray_DATA(py_PS); ret_bz_gp = ph3py_rotate_bz_grid_index( bz_grid_index, rotation, bz_grid_addresses, bz_map, D_diag, PS, type); return PyLong_FromLong(ret_bz_gp); } static PyObject *py_diagonalize_collision_matrix(PyObject *self, PyObject *args) { PyArrayObject *py_collision_matrix; PyArrayObject *py_eigenvalues; double cutoff; long i_sigma, i_temp, is_pinv, solver; double *collision_matrix; double *eigvals; long num_temp, num_grid_point, num_band; long num_column, adrs_shift; long info; if (!PyArg_ParseTuple(args, "OOlldll", &py_collision_matrix, &py_eigenvalues, &i_sigma, &i_temp, &cutoff, &solver, &is_pinv)) { return NULL; } collision_matrix = (double *)PyArray_DATA(py_collision_matrix); eigvals = (double *)PyArray_DATA(py_eigenvalues); if (PyArray_NDIM(py_collision_matrix) == 2) { num_temp = 1; num_column = (long)PyArray_DIM(py_collision_matrix, 1); } else { num_temp = (long)PyArray_DIM(py_collision_matrix, 1); num_grid_point = (long)PyArray_DIM(py_collision_matrix, 2); num_band = (long)PyArray_DIM(py_collision_matrix, 3); if (PyArray_NDIM(py_collision_matrix) == 8) { num_column = num_grid_point * num_band * 3; } else { num_column = num_grid_point * num_band; } } adrs_shift = (i_sigma * num_column * num_column * num_temp + i_temp * num_column * num_column); /* show_colmat_info(py_collision_matrix, i_sigma, i_temp, adrs_shift); */ info = phonopy_dsyev(collision_matrix + adrs_shift, eigvals, num_column, solver); if (is_pinv) { pinv_from_eigensolution(collision_matrix + adrs_shift, eigvals, num_column, cutoff, 0); } return PyLong_FromLong(info); } static PyObject *py_pinv_from_eigensolution(PyObject *self, PyObject *args) { PyArrayObject *py_collision_matrix; PyArrayObject *py_eigenvalues; double cutoff; long i_sigma, i_temp, pinv_method; double *collision_matrix; double *eigvals; long num_temp, num_grid_point, num_band; long num_column, adrs_shift; if (!PyArg_ParseTuple(args, "OOlldl", &py_collision_matrix, &py_eigenvalues, &i_sigma, &i_temp, &cutoff, &pinv_method)) { return NULL; } collision_matrix = (double *)PyArray_DATA(py_collision_matrix); eigvals = (double *)PyArray_DATA(py_eigenvalues); num_temp = (long)PyArray_DIMS(py_collision_matrix)[1]; num_grid_point = (long)PyArray_DIMS(py_collision_matrix)[2]; num_band = (long)PyArray_DIMS(py_collision_matrix)[3]; if (PyArray_NDIM(py_collision_matrix) == 8) { num_column = num_grid_point * num_band * 3; } else { num_column = num_grid_point * num_band; } adrs_shift = (i_sigma * num_column * num_column * num_temp + i_temp * num_column * num_column); /* show_colmat_info(py_collision_matrix, i_sigma, i_temp, adrs_shift); */ pinv_from_eigensolution(collision_matrix + adrs_shift, eigvals, num_column, cutoff, pinv_method); Py_RETURN_NONE; } static PyObject *py_get_default_colmat_solver(PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) { return NULL; } #ifdef MKL_LAPACKE return PyLong_FromLong((long)1); #else return PyLong_FromLong((long)4); #endif } static void pinv_from_eigensolution(double *data, const double *eigvals, const long size, const double cutoff, const long pinv_method) { long i, ib, j, k, max_l, i_s, j_s; double *tmp_data; double e, sum; long *l; l = NULL; tmp_data = NULL; tmp_data = (double *)malloc(sizeof(double) * size * size); #ifdef PHPYOPENMP #pragma omp parallel for #endif for (i = 0; i < size * size; i++) { tmp_data[i] = data[i]; } l = (long *)malloc(sizeof(long) * size); max_l = 0; for (i = 0; i < size; i++) { if (pinv_method == 0) { e = fabs(eigvals[i]); } else { e = eigvals[i]; } if (e > cutoff) { l[max_l] = i; max_l++; } } #ifdef PHPYOPENMP #pragma omp parallel for private(ib, j, k, i_s, j_s, sum) #endif for (i = 0; i < size / 2; i++) { /* from front */ i_s = i * size; for (j = i; j < size; j++) { j_s = j * size; sum = 0; for (k = 0; k < max_l; k++) { sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]]; } data[i_s + j] = sum; data[j_s + i] = sum; } /* from back */ ib = size - i - 1; i_s = ib * size; for (j = ib; j < size; j++) { j_s = j * size; sum = 0; for (k = 0; k < max_l; k++) { sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]]; } data[i_s + j] = sum; data[j_s + ib] = sum; } } /* when size is odd */ if ((size % 2) == 1) { i = (size - 1) / 2; i_s = i * size; for (j = i; j < size; j++) { j_s = j * size; sum = 0; for (k = 0; k < max_l; k++) { sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]]; } data[i_s + j] = sum; data[j_s + i] = sum; } } free(l); l = NULL; free(tmp_data); tmp_data = NULL; } static void show_colmat_info(const PyArrayObject *py_collision_matrix, const long i_sigma, const long i_temp, const long adrs_shift) { long i; printf(" Array_shape:("); for (i = 0; i < PyArray_NDIM(py_collision_matrix); i++) { printf("%d", (int)PyArray_DIM(py_collision_matrix, i)); if (i < PyArray_NDIM(py_collision_matrix) - 1) { printf(","); } else { printf("), "); } } printf("Data shift:%lu [%lu, %lu]\n", adrs_shift, i_sigma, i_temp); } static Larray *convert_to_larray(const PyArrayObject *npyary) { long i; Larray *ary; ary = (Larray *)malloc(sizeof(Larray)); for (i = 0; i < PyArray_NDIM(npyary); i++) { ary->dims[i] = PyArray_DIMS(npyary)[i]; } ary->data = (long *)PyArray_DATA(npyary); return ary; } static Darray *convert_to_darray(const PyArrayObject *npyary) { int i; Darray *ary; ary = (Darray *)malloc(sizeof(Darray)); for (i = 0; i < PyArray_NDIM(npyary); i++) { ary->dims[i] = PyArray_DIMS(npyary)[i]; } ary->data = (double *)PyArray_DATA(npyary); return ary; }
Statistics.h
#ifndef DEF_STATISTICS #define DEF_STATISTICS /* ========================================================================= Copyright (c) 2008-2015, Institute for Microelectronics, TU Wien. ----------------- ViennaTS - The Vienna Topography Simulator ----------------- Contact: viennats@iue.tuwien.ac.at License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ #include "Math.h" #include <cmath> #include <cstdlib> #if defined(_OPENMP) #include <omp.h> #endif //#include "sprng/sprng.h" #include <chrono> #include <random> #include "message.h" #include <vector> #include <fstream> #include <iostream> namespace my { ///Contains Random number generation algorythms and other statistical tools. namespace stat { static const double epsilon=1e-10; unsigned int ClockSEED = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator(ClockSEED); std::uniform_real_distribution<double> distribution(0.0,1.0); using namespace math; int* rng; #pragma omp threadprivate (rng) inline double RandomNumber() { return distribution(generator); } inline void PickRandomPointOnUnitCircle(double& a, double& b) { //better on AMD double x,y,x2,y2,x2py2; do { x=RandomNumber()-0.5; x2=x*x; y=RandomNumber()-0.5; y2=y*y; x2py2=x2+y2; } while ((x2py2>=0.25) || (x2py2<=epsilon)); a=(x2-y2)/x2py2; b=2*((x*y)/x2py2); } inline void PickRandomPointOnUnitCircle2(double& a, double& b) { double phi=RandomNumber()*Pi2; a=std::cos(phi); b=std::sin(phi); } inline void PickRandomPointOnUnitSphere(double& x, double& y, double& z) { //better double x2,y2,x2py2; do { x=2*RandomNumber()-1.; x2=x*x; y=2*RandomNumber()-1.; y2=y*y; x2py2=x2+y2; } while (x2py2>=1.); double tmp=2*std::sqrt(1.-x2py2); x*=tmp; y*=tmp; z=1.-2*x2py2; } inline double PowerCosineSineDistributionReturnCosTheta(const double N) { return std::pow(RandomNumber(),1./(N+1.)); } inline double ConeCosineSineDistributionReturnTheta(const double cone_angle) { double u, sqrt_1m_u; double angle; do { u=std::sqrt(RandomNumber()); sqrt_1m_u=std::sqrt(1.-u); angle=cone_angle*sqrt_1m_u; } while (RandomNumber()*angle*u>std::cos(Pi1_2*sqrt_1m_u)*std::sin(angle)); return angle; } inline double ConeCosineSineDistributionReturnCosTheta(const double cone_angle) { double u, sqrt_1m_u; double cosine; double left, right; do { u=std::sqrt(RandomNumber()); sqrt_1m_u=std::sqrt(1.-u); cosine=std::cos(cone_angle*sqrt_1m_u); left=RandomNumber()*cone_angle*sqrt_1m_u*u; left*=left; right=std::cos(Pi1_2*sqrt_1m_u); right*=right; right*=(1.-cosine*cosine); } while (left>right); return cosine; } inline double ConeCosineSineDistributionReturnTheta2(const double cone_angle) { double cosine; double _1_m_cos_cone_angle=1.-std::cos(cone_angle); double angle; double a=Pi1_2/cone_angle; do { cosine=1-RandomNumber()*_1_m_cos_cone_angle; angle=std::acos(cosine); } while (RandomNumber()>std::cos(a*angle)); return angle; } inline double ConeCosineSineDistributionReturnTheta3(const double cone_angle) { double angle; double sqrt; do { sqrt=std::sqrt(RandomNumber()); angle=sqrt*cone_angle; } while (RandomNumber()*angle>std::cos(Pi1_2*sqrt)*std::sin(angle)); return angle; } template<class VecType, class VecType2> inline void Rotate(const VecType& AverageDirection, VecType2& RandomDirection, const double sinphi, const double cosphi, double costheta, const double r2=1.) { costheta=std::min(costheta,1.); double a0; double a1; if (std::fabs(AverageDirection[0])<=std::fabs(AverageDirection[1])) { a0=AverageDirection[0]; a1=AverageDirection[1]; } else { a0=AverageDirection[1]; a1=AverageDirection[0]; } const double a0_a0_m1=1.-a0*a0; const double tmp=std::sqrt(std::max(1.-costheta*costheta,0.)/(r2*a0_a0_m1)); const double tmp_sinphi=tmp*sinphi; const double tmp_cosphi=tmp*cosphi; const double costheta_p_a0_tmp_sinphi=costheta+a0*tmp_sinphi; RandomDirection[0]=a0*costheta-a0_a0_m1*tmp_sinphi; RandomDirection[1]=a1 *costheta_p_a0_tmp_sinphi+AverageDirection[2]*tmp_cosphi; RandomDirection[2]=AverageDirection[2]*costheta_p_a0_tmp_sinphi-a1 *tmp_cosphi; if (a0!=AverageDirection[0]) std::swap(RandomDirection[0],RandomDirection[1]); } template<class VecType, class VecType2> inline void RandomAzimuthalRotation3(const VecType& AverageDirection, VecType2& RandomDirection, const double costheta) { double cosphi, sinphi; PickRandomPointOnUnitCircle(cosphi, sinphi); Rotate(AverageDirection, RandomDirection, sinphi, cosphi, costheta); } template<class VecType, class VecType2> inline void RandomAzimuthalRotation(const VecType& AverageDirection, VecType2& RandomDirection, const double costheta) { double cosphi, sinphi; double r2; do { cosphi=RandomNumber()-0.5; sinphi=RandomNumber()-0.5; r2=cosphi*cosphi+sinphi*sinphi; } while (r2>=0.25 || r2<=epsilon) ; Rotate(AverageDirection, RandomDirection, sinphi, cosphi, costheta, r2); } template<class VecType, class VecType2> inline void RandomAzimuthalRotation2(const VecType& AverageDirection, VecType2& RandomDirection, const double costheta) { double tmp[3]; double dot; do { PickRandomPointOnUnitSphere(tmp[0],tmp[1], tmp[2]); dot=AverageDirection[0]*tmp[0]+AverageDirection[1]*tmp[1]+AverageDirection[2]*tmp[2]; } while (dot>=1.); double r=std::sqrt((1-costheta*costheta)/(1-dot*dot)); RandomDirection[0]=AverageDirection[0]*costheta+(tmp[0]-AverageDirection[0]*dot)*r; RandomDirection[1]=AverageDirection[1]*costheta+(tmp[1]-AverageDirection[1]*dot)*r; RandomDirection[2]=AverageDirection[2]*costheta+(tmp[2]-AverageDirection[2]*dot)*r; } template<class VecType, class VecType2> inline void CosineNDistributedRandomDirection(double N, const VecType& AverageDirection, VecType2& RandomDirection) { double costheta=PowerCosineSineDistributionReturnCosTheta(N); RandomAzimuthalRotation(AverageDirection, RandomDirection, costheta); } template<class VecType, class VecType2> inline void CosineNDistributedRandomDirection(double N, const VecType& AverageDirection, VecType2& RandomDirection, double cos_cutoff_angle) { double costheta; do { costheta=PowerCosineSineDistributionReturnCosTheta(N); } while (costheta<cos_cutoff_angle); RandomAzimuthalRotation(AverageDirection, RandomDirection, costheta); } template<class VecType, class VecType2> inline void Cosine1DistributedRandomDirection(const VecType& AverageDirection, VecType2& RandomDirection, double twice_cos_cutoff_angle=0.) { double tmp; do { PickRandomPointOnUnitSphere(RandomDirection[0], RandomDirection[1], RandomDirection[2]); RandomDirection[0]+=AverageDirection[0]; RandomDirection[1]+=AverageDirection[1]; RandomDirection[2]+=AverageDirection[2]; tmp=std::sqrt(RandomDirection[0]*RandomDirection[0]+RandomDirection[1]*RandomDirection[1]+RandomDirection[2]*RandomDirection[2]); } while (tmp<=twice_cos_cutoff_angle); RandomDirection[0]/=tmp; RandomDirection[1]/=tmp; RandomDirection[2]/=tmp; } template<class VecType, class VecType2> inline void CosAngleDistributedRandomDirection(double Angle,const VecType& AverageDirection, VecType2& RandomDirection) { double costheta=std::cos(ConeCosineSineDistributionReturnTheta(Angle)); RandomAzimuthalRotation(AverageDirection, RandomDirection, costheta); } template<class VecType1, class VecType2, class VecType3, class ValType> inline void NormalDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position) { static const double fac=-std::log(16.); double v0,v1,rsq; do { v0=2.0*RandomNumber()-1.0; v1=2.0*RandomNumber()-1.0; rsq=v0*v0+v1*v1; } while (rsq>=1.0 || rsq<1e-20); Rotate(dir, position,v0,v1, 0.,rsq); rsq=std::sqrt(std::log(rsq)/(rsq*fac))*FWHM; for (int k=0;k<3;k++) { position[k]*=rsq; position[k]+=center[k]; } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline void NormalDistributedStartPosition2(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter) { static const double fac=-std::log(16.); double v0,v1,rsq; do { v0=2.0*RandomNumber()-1.0; v1=2.0*RandomNumber()-1.0; rsq=v0*v0+v1*v1; } while (rsq>=1.0 || rsq<1e-20); Rotate(dir, position,v0,v1, 0.,rsq); rsq=std::sqrt(std::log(rsq)/(fac))*FWHM; for (int k=0;k<3;k++) { position[k]*=rsq; position[k]+=center[k]; } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline void LorentzDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter) { int x=4; // int y; double radius; radius=FWHM/2*std::sqrt(exp(my::math::Pi*RandomNumber())-1); double theta; theta = 2*my::math::Pi*RandomNumber(); for (int i=0;i<3;i++) { if (Parameter.open_boundary==i) { position[i]=center[i]; } else { if (x==4) { x=i; position[i]=radius*std::cos(theta)+center[i]; //position[i]=radius*std::cos(theta)+center[i]; } else { // y=i; position[i]=radius*std::sin(theta)+center[i]; //position[i]=radius*std::sin(theta)+center[i]; } } } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline void SurfaceChargeDensityDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter, double distance) { int x=4, y=4; double radius, randnum; randnum=RandomNumber(); radius=2*FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; radius*=5e-9/distance; double theta; theta = 2*my::math::Pi*RandomNumber(); for (int i=0;i<3;i++) { if (Parameter.open_boundary==i) { position[i]=center[i]; } else { if (x==4) { x=i; position[i]=radius*std::cos(theta)+center[i]; } else { y=i; position[i]=radius*std::sin(theta)+center[i]; } } } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline double SurfaceChargeDensityDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter) { int x=4; // int y; double radius, randnum; randnum=RandomNumber(); radius=2*FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; double theta; theta = 2*my::math::Pi*RandomNumber(); for (int i=0;i<3;i++) { if (Parameter.open_boundary==i) { position[i]=center[i]; } else { if (x==4) { x=i; position[i]=radius*std::cos(theta)+center[i]; //position[i]=radius*std::cos(theta)+center[i]; } else { // y=i; position[i]=radius*std::sin(theta)+center[i]; //position[i]=radius*std::sin(theta)+center[i]; } } } return radius; } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType, class PartitionType> inline void SurfaceChargeDensityDistribution(const VecType1& center, const VecType2& dir, ValType voltage, VecType3& position, const ParameterType& Parameter, const PartitionType& Partition) { bool keep; double d; for (int i=0;i<3;i++) d=(Parameter.open_boundary==i)?(center[i]-(Partition.Max(i)-1)*Parameter.grid_delta):d; do { for (int i=0;i<3;i++) position[i]=(Parameter.open_boundary==i)?center[i]:RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; double PositionSumSquares=0; for (int i=0;i<3;i++) PositionSumSquares+=(Parameter.open_boundary!=i)?(position[i]-center[i])*(position[i]-center[i]):0; double scd; scd = (d*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquares+d*d)*sqrt(PositionSumSquares+d*d)); double scd_max; scd_max = (1.6e-19)/(2*(my::math::Pi)*(d*d)); double randomnu; randomnu=(scd_max)*RandomNumber(); keep=scd<randomnu; } while (keep); } template<class VecType1, class VecType2, class ValType, class ParameterType, class PartitionType> inline void SurfaceChargeDensityDistribution(const VecType1& dir, ValType voltage, VecType2& position, const ParameterType& Parameter, const PartitionType& Partition, const std::vector<double>& positions, const std::vector<double>& charges) { //const std::vector<double, std::allocator<double> >&, const std::vector<double, std::allocator<double> >& bool keep; double d=3.7;//e-9; double position_max[3]; position_max[0]=21.5;//25; position_max[1]=43.7; position_max[2]=20.; do { double scd=0; double scd_max=0; for (int i=0;i<3;i++) position[i]=(Parameter.open_boundary==i)?((Partition.Max(i)-1)*Parameter.grid_delta):RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; // position_max[0]=23;//25; // position_max[1]=43.7; // position_max[2]=20; for (unsigned int i=0;i<charges.size();++i){ double PositionSumSquares=0; for (int j=0;j<3;j++) PositionSumSquares+=(Parameter.open_boundary!=j)?(position[j]-positions[3*i+j])*(position[j]-positions[3*i+j]):0; double PositionSumSquaresMax=0; for (int j=0;j<3;j++) PositionSumSquaresMax+=(Parameter.open_boundary!=j)?(position_max[j]-positions[3*i+j])*(position_max[j]-positions[3*i+j]):0; scd += (d*charges[i]*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquares+d*d)*sqrt(PositionSumSquares+d*d)); scd_max += (d*charges[i]*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquaresMax+d*d)*sqrt(PositionSumSquaresMax+d*d)); } double randomnu; randomnu=(scd_max)*RandomNumber()*1.2; keep=scd<randomnu; } while (keep); } template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType> inline double NanowireSurfaceCharge(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Height, ValType Angle, VecType4& position, const ParameterType& Parameter) { double volume_sides, volume_line; volume_sides = 1;//FWHM/3;//2*FWHM/3; volume_line = Length*3/(my::math::Pi*FWHM); double line_or_sides; line_or_sides=(volume_sides+volume_line)*RandomNumber(); double X,Y; int x=4, y=4; for (int i=0;i<3;i++) { if (Parameter.open_boundary!=i) { if (x==4) { x=i; } else { y=i; } } } //double d=3.7e-9; //double d=106.286545976e-9; double radius=0; if (line_or_sides <= volume_line) { // generate a particle in the line double randx, randy; randx=RandomNumber()-0.5; // double radius; radius=2*FWHM*randx/(3*sqrt(1-4*randx*randx)); //radius=2*2*FWHM*randx/(3*sqrt(1-4*randx*randx)); X = radius; randy=RandomNumber(); Y = Length*randy; } else { // generate a particle on the sides double randnum=RandomNumber(); // double radius; radius=FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; //radius=2*FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; double theta; theta = 2*my::math::Pi*RandomNumber(); X=radius*std::cos(theta); Y=radius*std::sin(theta); Y += Y>0?Length:0; // radius=2*radius*radius; } for (int i=0;i<3;i++) { if (i==x) { position[i] = cos(-Angle)*(X) - sin(-Angle)*(Y) + StartPosition[i]; } else if (i==y) { position[i] = sin(-Angle)*(X) + cos(-Angle)*(Y) + StartPosition[i]; } else { position[i] = StartPosition[i]; } } return radius; } template<class VecType1, class ParameterType, class PartitionType> inline void Junctionless(VecType1& position, const ParameterType& Parameter, const PartitionType& Partition) { double where = 77.22*RandomNumber(); double X,Z; if (where < 25){ //Box 1 X = 5000*RandomNumber()+10000; Z = 5000*RandomNumber()+5000; } else if (where < 50) { //Box 2 X = 5000*RandomNumber(); Z = 5000*RandomNumber(); } else if (where < 75) { //Box 3 X = 5000*RandomNumber(); Z = 5000*RandomNumber()+10000; } else { //Gate line X = 7400*RandomNumber()+2600; Z = 300*RandomNumber()+7350; } position[0]=X; position[1]=100; position[2]=Z; } template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType, class PartitionType> inline void NanowireSurfaceChargeDistribution(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Angle, VecType4& position, const ParameterType& Parameter, const PartitionType& Partition) { bool keep; double d=0; int x=4; int y=4; for (int i=0;i<3;i++) d=(Parameter.open_boundary==i)?(StartPosition[i]-(Partition.Max(i)-1)*Parameter.grid_delta):d; double scd_max; scd_max = (1.6e-19)/(2*(my::math::Pi)*(d*d)); do { for (int i=0;i<3;i++) { if (Parameter.open_boundary==i){ position[i]=StartPosition[i]; } else { position[i]=RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; if (x==4) { //if x has not yet been assigned x=i; } else { //x has been assigned, now assign y y=i; } } } double alpha; //apha is the angle the line makes with the horizontal direction if ((EndPosition[x]-StartPosition[x])==0) { if ((EndPosition[y]-StartPosition[y]) > 0) { alpha=my::math::Pi/2; } else {//if ((EndPosition[2]-StartPosition[2]) < 0) { alpha=-my::math::Pi/2; } } else { alpha=std::atan((EndPosition[y]-StartPosition[y])/(EndPosition[x]-StartPosition[x])); if ((EndPosition[x]-StartPosition[x])<0) alpha+=my::math::Pi; } double beta; //beta is the angle between start position and position(); if ((position[x]-StartPosition[x])==0) { if ((position[y]-StartPosition[y]) > 0) { beta=my::math::Pi/2; } else if ((position[y]-StartPosition[y]) < 0) { beta=-my::math::Pi/2; } else { //(position[0]-StartPosition[0]) and (position[2]-StartPosition[2]) beta=alpha; } } else { beta=std::atan((position[y]-StartPosition[y])/(position[x]-StartPosition[x])); if ((position[x]-StartPosition[x])<0) beta+=my::math::Pi; } double hypotenuse_sq; hypotenuse_sq = (position[y]-StartPosition[y])*(position[y]-StartPosition[y])+(position[x]-StartPosition[x])*(position[x]-StartPosition[x]); double PositionSumSquares; if (beta<(alpha-my::math::Pi/2) || beta>(alpha+my::math::Pi/2)) { PositionSumSquares=hypotenuse_sq; } else { double theta; theta=beta-alpha; double DirectionalLength; DirectionalLength=std::sqrt(hypotenuse_sq)*std::cos(theta); if (DirectionalLength > Length) { PositionSumSquares = (position[y]-EndPosition[y])*(position[y]-EndPosition[y])+(position[x]-EndPosition[x])*(position[x]-EndPosition[x]); } else { PositionSumSquares = hypotenuse_sq*std::sin(theta)*std::sin(theta); } } double scd; scd = (d*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquares+d*d)*sqrt(PositionSumSquares+d*d)); double randomnu; randomnu=(scd_max)*RandomNumber(); keep=scd<randomnu; } while (keep); } template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType, class PartitionType> inline void NanowireLorentzDistribution(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Angle, VecType4& position, const ParameterType& Parameter, const PartitionType& Partition) { bool keep; int x=4; int y=4; double sigma; sigma=FWHM/2; double cauchy_max; cauchy_max=1/(my::math::Pi*sigma); do { for (int i=0;i<3;i++) { if (Parameter.open_boundary==i){ position[i]=StartPosition[i]; } else { position[i]=RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; if (x==4) { //if x has not yet been assigned x=i; } else { //x has been assigned, now assign y y=i; } } } double alpha; //apha is the angle the line makes with the horizontal direction if ((EndPosition[x]-StartPosition[x])==0) { if ((EndPosition[y]-StartPosition[y]) > 0) { alpha=my::math::Pi/2; } else {//if ((EndPosition[2]-StartPosition[2]) < 0) { alpha=-my::math::Pi/2; } } else { alpha=std::atan((EndPosition[y]-StartPosition[y])/(EndPosition[x]-StartPosition[x])); if ((EndPosition[x]-StartPosition[x])<0) alpha+=my::math::Pi; } double beta; //beta is the angle between start position and position(); if ((position[x]-StartPosition[x])==0) { if ((position[y]-StartPosition[y]) > 0) { beta=my::math::Pi/2; } else if ((position[y]-StartPosition[y]) < 0) { beta=-my::math::Pi/2; } else { //(position[0]-StartPosition[0])==0 and (position[2]-StartPosition[2])==0 beta=alpha; } } else { beta=std::atan((position[y]-StartPosition[y])/(position[x]-StartPosition[x])); if ((position[x]-StartPosition[x])<0) beta+=my::math::Pi; } double hypotenuse_sq; hypotenuse_sq = (position[y]-StartPosition[y])*(position[y]-StartPosition[y])+(position[x]-StartPosition[x])*(position[x]-StartPosition[x]); double rc; if (beta<(alpha-my::math::Pi/2) || beta>(alpha+my::math::Pi/2)) { rc=hypotenuse_sq; } else { double theta; theta=beta-alpha; double DirectionalLength; DirectionalLength=std::sqrt(hypotenuse_sq)*std::cos(theta); if (DirectionalLength > Length) { rc = (position[y]-EndPosition[y])*(position[y]-EndPosition[y])+(position[x]-EndPosition[x])*(position[x]-EndPosition[x]); } else { rc = hypotenuse_sq*std::sin(theta)*std::sin(theta); } } double cauchy; cauchy=1/(my::math::Pi*sigma*(1+(rc/(sigma*sigma)))); double randomnu; randomnu=(cauchy_max)*RandomNumber(); keep=cauchy<randomnu; } while (keep); } //using alternative Monte Carlo template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType> inline void NanowireLorentzDistribution2(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Height, ValType Angle, VecType4& position, const ParameterType& Parameter) { double volume_sides, volume_line; volume_sides = 2*Height*(FWHM/2)*(FWHM/2)*(my::math::Pi)*(my::math::Pi); volume_line = Height*(my::math::Pi)*(FWHM/2)*Length; double line_or_sides; line_or_sides=(volume_sides+volume_line)*RandomNumber(); double X,Y; int x=4, y=4; for (int i=0;i<3;i++) { if (Parameter.open_boundary!=i) { if (x==4) { x=i; } else { y=i; } } } if (line_or_sides <= volume_line) { // generate a particle in the line double randx, randy; randx=RandomNumber()-0.5; double radius; radius=std::tan(my::math::Pi*randx); X = FWHM/2*radius; randy=RandomNumber(); Y = Length*randy; } else { // generate a particle on the sides // double rannum; // rannum=RandomNumber(); double radius; //radius=std::sqrt(exp(my::math::Pi*RandomNumber())-1); radius=FWHM/2*std::sqrt(std::abs(exp(2*my::math::Pi*RandomNumber()))-1); double theta; theta = 2*my::math::Pi*RandomNumber(); X=radius*std::cos(theta); Y=radius*std::sin(theta); Y += Y>0?Length:0; } for (int i=0;i<3;i++) { if (i==x) { position[i] = cos(-Angle)*(X) - sin(-Angle)*(Y) + StartPosition[i]; } else if (i==y) { position[i] = sin(-Angle)*(X) + cos(-Angle)*(Y) + StartPosition[i]; } else { position[i] = StartPosition[i]; } } } template<class DropletType, class VecType1, class VecType2, class ParameterType, class PartitionType> inline void ESDDistribution(const DropletType& d, const VecType1& StartPosition, VecType2& Position, double& r, double& q, long double* Velocity, const ParameterType& Parameter, const PartitionType& Partition){ double d_test_v; double d_test_r; //----------------Find the radius distribution---------------------------------- double volume_fraction=0.42*RandomNumber()+0.58; double r_min_inv=1/2.5e-6; double r_max_inv=1/55e-6; double r_max_inv_third=exp(log(r_max_inv)/3); double r_min_inv_third=exp(log(r_min_inv)/3); double radius=1/pow(volume_fraction*(r_max_inv_third-r_min_inv_third)+r_min_inv_third,3.); double eta_a = 2.2e-5; //Ns/m2 double rho_d = 789; // kg/m3 //------------------------------------------------------------------------------ //----------------Calculate the charge given r---------------------------------- double gamma_d = 0.022; double permittivity = 8.854187817e-12; double qd=0.58*8*my::math::Pi*sqrt(gamma_d*permittivity*radius*radius*radius); //------------------------------------------------------------------------------ //----------------Find the initial droplet position (cylidrical)---------------- long double theta_i=0.5*my::math::Pi*RandomNumber()*0.5; // 45 degree spray cone long double phi=2*my::math::Pi*RandomNumber(); double location_radius=0.5; double z_star1 = 1-location_radius*cos(theta_i); // height double r_star1 = location_radius*sin(theta_i); // radius double mass = 4*my::math::Pi*rho_d*radius*radius*radius/3; //---FIND THE INITIAL AND THERMALDROPLET ELECTRICAL FORCES AND ACCELLERATIONS--- double H = StartPosition[Parameter.open_boundary]; //270 mm or 0.27 m double Phi_0 = 10e3; //V double R = 1e-3; //outer radius of the nozzle (guess) double K_V = 1-exp(-0.021*H/R); //non-dimensional related to H/R ratio double Phi_star = K_V/(log(4*H/R)); double E_e = Phi_0*Phi_star/H; //-----------Find expected initial electric force when E-field acts alone-------- double plusz1 = 1+z_star1;//1+z_star; double minusz1 = 1-z_star1;//1-z_star; double rootplusz1 = std::sqrt(r_star1*r_star1+plusz1*plusz1); double rootminusz1 = std::sqrt(r_star1*r_star1+minusz1*minusz1); double E_v1 = E_e*(1/rootminusz1+1/rootplusz1); double E_r1 = E_e*(plusz1/rootplusz1-minusz1/rootminusz1)/r_star1; double theta=atan(E_r1/E_v1); //-----------Find expected final electric force where E-field acts alone--------- double t_heat = 10e-3; double r_star2 = (E_r1*(H-t_heat))/E_v1; double z_star2 = 0;//t_heat/H; double plusz2 = 1+z_star2;//1+z_star; double minusz2 = 1-z_star2;//1-z_star; double rootplusz2 = std::sqrt(r_star2*r_star2+plusz2*plusz2); double rootminusz2 = std::sqrt(r_star2*r_star2+minusz2*minusz2); double Eth_v2 = E_e*(1/rootminusz2+1/rootplusz2); double Eth_r2 = E_e*(plusz2/rootplusz2-minusz2/rootminusz2)/r_star2; //-----------Use the first and second to come up with linear dependence---------- double E_v = (E_v1-Eth_v2)/(H-t_heat); double E_r = (theta<1e-20)?0:(E_r1-Eth_r2)/((H-t_heat)*tan(theta)); double Fe_v = qd*E_v; double Fe_r = qd*E_r; double ae_v = Fe_v/mass; // initial dependent component double ae_r = Fe_r/mass; // initial dependent component //------------Calculate the required constant component of electric force-------- double Fe_v1 = qd*E_v1; double Fe_r1 = qd*E_r1; double ae_v1 = Fe_v1/mass; // Initial constant component double ae_r1 = (theta<1e-20)?0:Fe_r1/mass; // Initial constant component //------------------------------------------------------------------------------- //------------------------------------------------------------------------------ //--------------Find all forces acting on the droplet--------------------------- // Find the droplet mass - knowns: radius, rho_d // Gravity force component acceleration double g = 9.81; //m/s // Stokes force component acceleration - knowns: rho_d, eta_a double s_f = (4.5*eta_a)/(rho_d*radius*radius); // Electric force component acceleration - knowns: q_d, r_star, z_star; double d0_v = 0; double d0_r = 0; double v_0 = 0; double v0_v = v_0*cos(theta); double v0_r = v_0*sin(theta); // This is for both dimensions //------------------------------------------------------------------------------ //--------------Find new position of droplet after t---------------------------- //First, separate the forces due to velocity/displacement dependences double a_v = g+ae_v1;//+2*a_e; // independent acceleration (mainly gravity) - vertical only double b = s_f; // velocity dependent acceleration - Stokes force - vert and rad double c_v = ae_v; // displacement dependent acceleration - vertical E-force double a_r = ae_r1; //+2*a_e; // independent acceleration - a_e component double c_r = ae_r; // displacement dependent acceleration - radial E-force //--------------Start vertical onlgoings --------------------------------------- long double t_drop; double v0th_v;//=0; double v0th_r;//=0; if (b*b-4*c_v<0) { t_drop=0; d_test_v = H-t_heat; d_test_r = r_star1*H; v0th_v=1; v0th_r=0; } else { double r1_v = (-b+std::sqrt(b*b-4*c_v))/(2); double r2_v = (-b-std::sqrt(b*b-4*c_v))/(2); double B1_v = d0_v*(r2_v)/(r2_v-r1_v); double A1_v = d0_v-B1_v; double B2_v = (v0_v+d0_v)/(r2_v-r1_v); double A2_v = -B2_v;; double C_v = a_v/(r1_v*r2_v); double B3_v = -a_v/(r2_v*(r2_v-r1_v)); double A3_v = -(B3_v+C_v); double iteration_stop=10000; double t_low = 0; double t_high = 1; double t_check = (t_low+t_high)/2; while (true){ d_test_v = (A1_v+A2_v+A3_v)*exp(r1_v*t_check)+(B1_v+B2_v+B3_v)*exp(r2_v*t_check)+C_v; if (t_high-t_low<1e-100) break; if (d_test_v > (H-t_heat)) { t_high=t_check; t_check=(t_high+t_low)/2; } else if (d_test_v < (H-t_heat)) { t_low=t_check; t_check=(t_high+t_low)/2; } if (iteration_stop==0) break; iteration_stop--; } t_drop=t_check; v0th_v = (A1_v+A2_v+A3_v)*(exp(r1_v*t_drop)-1)/r1_v+(B1_v+B2_v+B3_v)*(exp(r2_v*t_drop)-1)/r2_v+C_v*t_drop+v0_v; //---------Now time required to reach heat zone is known------------------------ //---------Can now calculate the radial displacement---------------------------- if (b*b-4*c_r<0) { t_drop=0; d_test_r = 0; v0th_r = 0; } else { double r1_r = (-b+std::sqrt(b*b-4*c_r))/(2); double r2_r = (-b-std::sqrt(b*b-4*c_r))/(2); double B1_r = d0_r*(r2_r)/(r2_r-r1_r); double A1_r = d0_r-B1_r; double B2_r = (v0_r+d0_r)/(r2_r-r1_r); double A2_r = -B2_r;; double C_r = a_r/(r1_r*r2_r); double B3_r = -a_r/(r2_r*(r2_r-r1_r)); double A3_r = -(B3_r+C_r); d_test_r = (A1_r+A2_r+A3_r)*exp(r1_r*t_drop)+(B1_r+B2_r+B3_r)*exp(r2_r*t_drop)+C_r; v0th_r = (A1_r+A2_r+A3_r)*(exp(r1_r*t_drop)-1)/r1_r+(B1_r+B2_r+B3_r)*(exp(r2_r*t_drop)-1)/r2_r+C_r*t_drop+v0_r; } } //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------ THERMAL ZONE CALCULATIONS ----------------------- //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // //---------Now we know z and r on the cusp of the thermal zone------------------ // //---------Reset parameters to include thermal components and re-run ----------- //--------CALCULATE THERMAL EFFECTS IN DROPLET SIZE REDUCTION------------------- double dth_test_v; double dth_test_r; double q0 = 373e-12; // (373 um^2) for water: 88e-12 (m^2) double q1 = 89.1; // (8.91e-5 /um) for water: 4.3e3 (/m) double del_T = 100000; double dK = q0*del_T*(1+2*q1*radius); double r_new = radius-radius*t_drop*exp(log(dK)/3); r=r_new; double qd_new = 0.58*8*my::math::Pi*sqrt(gamma_d*permittivity*r_new*r_new*r_new); double mass_new = 4*my::math::Pi*rho_d*r_new*r_new*r_new/3; // // //---------CALCULATE THE ENERGIES ASSUMING LINEAR REDUCTION TO FINAL------------ // //---------Electric force F_e is constant in this region and small-------------- // double rth_star = d_test_r/H; //d_r/H; double zth_star = 1-d_test_v/H; //1-d_v/H; double d0th_v = 0;//d_test_v; double d0th_r = 0;//d_test_r; double tth_drop = t_drop; double thplusz = 1+zth_star;//1+z_star; double thminusz = 1-zth_star;//1-z_star; double throotplusz = std::sqrt(rth_star*rth_star+thplusz*thplusz); double throotminusz = std::sqrt(rth_star*rth_star+thminusz*thminusz); double Eth_v = E_e*(1/throotminusz+1/throotplusz); double Eth_r = E_e*(thplusz/throotplusz-thminusz/throotminusz)/rth_star; double theta_th = atan(Eth_r/Eth_v); double Feth_v = qd_new*Eth_v; double Feth_r = qd_new*Eth_r; double aeth_v = Feth_v/mass_new; double aeth_r = Feth_r/mass_new; //--------------Find all other forces acting on the droplet--------------------- // Find the droplet mass - knowns: radius, rho_d // Gravity force component acceleration // Stokes force component acceleration - knowns: rho_d, eta_a double sth_f = (4.5*eta_a)/(rho_d*r_new*r_new); // Thermophoretic force double kappa_a = 0.025; double kappa_d = 0.19; double grad_T = 100000; double T = 523; double rho_a = 1.29; double F_th = 3*my::math::Pi*eta_a*eta_a*r_new*3*kappa_a*grad_T/(rho_a*T*(2*kappa_a+kappa_d)); double a_th = F_th/mass_new; // Electric force component acceleration - knowns: q_d, r_star, z_star; double ath_v = g+aeth_v-a_th;//+2*a_e; // independent acceleration (gravity, initial e-force, thermal force) double bth = sth_f; // velocity dependent acceleration - Stokes force double cth_v = aeth_v/t_heat; // displacement dependent acceleration - vertical E-force double ath_r = (theta_th<1e-20)?0:aeth_r; // independent acceleration - Initial electric force double cth_r = (theta_th<1e-20)?0:aeth_r/(t_heat*tan(theta_th)); // displacement dependent acceleration - Radial E-force double v_final_v=0; double v_final_r=0; if (bth*bth-4*cth_v<0) { tth_drop=0; dth_test_v = t_heat; dth_test_r = d_test_r; v_final_v = 1; v_final_r = 0; } else { double r1th_v = (-bth+std::sqrt(bth*bth-4*cth_v))/(2); double r2th_v = (-bth-std::sqrt(bth*bth-4*cth_v))/(2); double B1th_v = d0th_v*(r2th_v)/(r2th_v-r1th_v); double A1th_v = d0th_v-B1th_v; double B2th_v = (v0th_v+d0th_v)/(r2th_v-r1th_v); double A2th_v = -B2th_v;; double Cth_v = ath_v/(r1th_v*r2th_v); double B3th_v = -ath_v/(r2th_v*(r2th_v-r1th_v)); double A3th_v = -(B3th_v+Cth_v); double iteration_stopth =10000; double tth_low = 0; double tth_high = t_drop; double tth_check = (tth_low+tth_high)/2; while (true){ dth_test_v = (A1th_v+A2th_v+A3th_v)*exp(r1th_v*tth_check)+(B1th_v+B2th_v+B3th_v)*exp(r2th_v*tth_check)+Cth_v; if (tth_high-tth_low<1e-10) break; if (dth_test_v > t_heat) { tth_high=tth_check; tth_check=(tth_high+tth_low)/2; } else if (dth_test_v < t_heat) { tth_low=tth_check; tth_check=(tth_high+tth_low)/2; } if (iteration_stopth==0) break; iteration_stopth--; } tth_drop = tth_check; v_final_v = (A1th_v+A2th_v+A3th_v)*(exp(r1th_v*tth_drop)-1)/r1th_v+(B1th_v+B2th_v+B3th_v)*(exp(r2th_v*tth_drop)-1)/r2th_v+Cth_v*tth_drop+v0th_v; //---------Now time required to reach surface is known-------------------------- //---------Can now calculate the radial displacement---------------------------- if (bth*bth-4*cth_r<0) { tth_drop=0; dth_test_r = 0; v_final_r = 0; } else { double r1th_r = (-bth+std::sqrt(bth*bth-4*cth_r))/(2); double r2th_r = (-bth-std::sqrt(bth*bth-4*cth_r))/(2); double B1th_r = d0th_r*(r2th_r)/(r2th_r-r1th_r); double A1th_r = d0th_r-B1th_r; double B2th_r = (v0th_r+d0th_r)/(r2th_r-r1th_r); double A2th_r = -B2th_r;; double Cth_r = ath_r/(r1th_r*r2th_r); double B3th_r = -ath_r/(r2th_r*(r2th_r-r1th_r)); double A3th_r = -(B3th_r+Cth_r); dth_test_r = (A1th_r+A2th_r+A3th_r)*exp(r1th_r*tth_drop)+(B1th_r+B2th_r+B3th_r)*exp(r2th_r*tth_drop)+Cth_r; v_final_r = (A1th_r+A2th_r+A3th_r)*(exp(r1th_r*tth_drop)-1)/r1th_r+(B1th_r+B2th_r+B3th_r)*(exp(r2th_r*tth_drop)-1)/r2th_r+Cth_r*tth_drop+v0th_r; } } Velocity[0] = v_final_r*cos(phi); Velocity[1] = -v_final_v; Velocity[2] = v_final_r*sin(phi); Position[0] = std::sqrt(dth_test_r+d_test_r)*cos(phi)+StartPosition[0]; Position[1] = 0; Position[2] = std::sqrt(dth_test_r+d_test_r)*sin(phi)+StartPosition[2]; r = r_new; q = qd_new; } template<class DropletType, class VecType1, class VecType2, class ParameterType, class PartitionType> inline void EvenlyDistributed(const DropletType& d, const VecType1& StartPosition, VecType2& Position, double& r, double& q, long double* Velocity, const ParameterType& Parameter, const PartitionType& Partition){ Velocity[0] = 0; Velocity[1] = -1; Velocity[2] = 0; double volume_fraction=0.22*RandomNumber()+0.58; double r_min_inv=1/2.5e-6; double r_max_inv=1/55e-6; double r_max_inv_third=exp(log(r_max_inv)/3); double r_min_inv_third=exp(log(r_min_inv)/3); double radius=1/pow(volume_fraction*(r_max_inv_third-r_min_inv_third)+r_min_inv_third,3.); r = radius; Position[0]=RandomNumber()*4*(Partition.Max(0)*Parameter.grid_delta)-2*(Partition.Max(0)*Parameter.grid_delta); Position[1]=0; Position[2]=RandomNumber()*4*(Partition.Max(2)*Parameter.grid_delta)-2*(Partition.Max(2)*Parameter.grid_delta); double gamma_d = 0.022; double permittivity = 8.854187817e-12; double qd=0.58*8*my::math::Pi*sqrt(gamma_d*permittivity*radius*radius*radius); q = qd; } template<class VecType1, class VecType2, class ParameterType, class PartitionType> inline void EvenlyDistributed(const VecType1& StartPosition, VecType2& Position, const ParameterType& Parameter, const PartitionType& Partition){ Position[0]=RandomNumber()*(Partition.Max(0)*Parameter.grid_delta-Partition.Min(0)*Parameter.grid_delta)+Partition.Min(0)*Parameter.grid_delta; Position[1]=0; Position[2]=RandomNumber()*(Partition.Max(2)*Parameter.grid_delta-Partition.Min(2)*Parameter.grid_delta)+Partition.Min(2)*Parameter.grid_delta; } template<class DropletType, class VecType1> inline void DiskDistribution(const DropletType d, VecType1& position) { double v0, v1, rsq; do { v0=2.0*RandomNumber()-1.0; v1=2.0*RandomNumber()-1.0; rsq=v0*v0+v1*v1; } while (rsq>=1.0 || rsq<1e-20); position[0] = 2*d.Radius*v0+d.Position[0]; position[1] = 0;//d.Position[1]; position[2] = 2*d.Radius*v1+d.Position[2]; } template<class DataType> bool AnyElement(typename std::vector<DataType> vec, DataType check){ typename std::vector<DataType>::iterator first = vec.begin(), last = vec.end(); while(first!=last){ if(*first==check) return true; ++first; } return false; } } } #endif //DEF_STATISTICS
spmm_csr_mat.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <HiParTI.h> int main(int argc, char * const argv[]) { char * mm_filename = NULL; FILE *fi = NULL, *fo = NULL; ptiSparseMatrix spA; ptiSparseMatrixCSR csrA; ptiMatrix B, C; ptiIndex R = 16; int niters = 5; ptiTimer timer; ptiNewTimer(&timer, 0); /* OpenMP */ int cuda_dev_id = -2; int nthreads = 1; // get from OMP_NUM_THREADS environment static struct option long_options[] = { {"input", required_argument, 0, 'i'}, {"output", optional_argument, 0, 'o'}, {"R", optional_argument, 0, 'r'}, {"cuda-dev-id", optional_argument, 0, 'd'}, {0, 0, 0, 0} }; for(;;) { int option_index = 0; int c = 1; c = getopt_long(argc, argv, "i:o:r:d:", long_options, &option_index); if(c == -1) { break; } switch(c) { case 'i': mm_filename = optarg; fi = fopen(optarg, "r"); ptiAssert(fi != NULL); break; case 'o': fo = fopen(optarg, "w"); ptiAssert(fo != NULL); break; case 'r': sscanf(optarg, "%"HIPARTI_SCN_INDEX, &R); break; case 'd': sscanf(optarg, "%d", &cuda_dev_id); break; default: abort(); } } printf("B ncols: %d\n", R); printf("niters: %d\n", niters); printf("cuda_dev_id: %d\n", cuda_dev_id); if(cuda_dev_id == -1) { #ifdef HIPARTI_USE_OPENMP #pragma omp parallel nthreads = omp_get_num_threads(); #endif printf("nthreads: %d\n", nthreads); } if(optind > argc || argc < 2) { printf("Usage: %s\n", argv[0]); printf("Options: -i INPUT, --input=INPUT\n"); printf(" -o OUTPUT, --output=OUTPUT\n"); printf(" -R RANK\n"); printf(" -d CUDA_DEV_ID, --cuda-dev-id=DEV_ID\n"); printf("\n"); return 1; } printf("Reading sparse matrix from file (%s) ...",mm_filename); fflush(stdout); ptiAssert(ptiLoadSparseMatrix(&spA, 1, fi) == 0); fclose(fi); printf(" done\n"); ptiSparseMatrixStatus(&spA, stdout); // ptiAssert(ptiDumpSparseMatrix(&spA, 0, stdout) == 0); ptiAssert(ptiSparseMatrixToCSR(&csrA, &spA) == 0); ptiFreeSparseMatrix(&spA); ptiSparseMatrixStatusCSR(&csrA, stdout); // ptiAssert(ptiDumpSparseMatrixCSR(&csrA, stdout) == 0); ptiNewMatrix(&B, csrA.ncols, R); ptiRandomizeMatrix(&B); ptiNewMatrix(&C, csrA.nrows, R); ptiConstantMatrix(&C, 0); // ptiAssert(ptiDumpMatrix(&B, stdout) == 0); // ptiAssert(ptiDumpMatrix(&C, stdout) == 0); // Warm-up if(cuda_dev_id == -2) { printf("Run ptiSparseMatrixMulMatrixCSR:\n"); ptiSparseMatrixMulMatrixCSR(&C, &csrA, &B); } else if(cuda_dev_id == -1) { printf("Run ptiOmpSparseMatrixMulMatrixCSR:\n"); ptiOmpSparseMatrixMulMatrixCSR(&C, &csrA, &B); } ptiStartTimer(timer); for(int i=0; i<niters; ++i) { if(cuda_dev_id == -2) { ptiSparseMatrixMulMatrixCSR(&C, &csrA, &B); } else if(cuda_dev_id == -1) { ptiOmpSparseMatrixMulMatrixCSR(&C, &csrA, &B); } } ptiStopTimer(timer); printf("\n"); double elapsed_time = ptiPrintAverageElapsedTime(timer, niters, "CSR-SpMM"); ptiNnzIndex flops = 2 * csrA.nnz * R; ptiPrintGFLOPS(elapsed_time, flops, "CSR-SpMM"); if(fo != NULL) { ptiAssert(ptiDumpMatrix(&C, fo) == 0); fclose(fo); } ptiFreeSparseMatrixCSR(&csrA); ptiFreeMatrix(&B); ptiFreeMatrix(&C); ptiFreeTimer(timer); return 0; }
app.c
#include <assert.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <omp.h> #include "../../support/matrix.h" #include "../../support/params.h" #include "../../support/timer.h" #include "../../support/utils.h" int main(int argc, char **argv) { // Process parameters struct Params p = input_params(argc, argv); // Initialize SpMV data structures PRINT_INFO(p.verbosity >= 1, "Reading matrix %s", p.fileName); struct COOMatrix cooMatrix = readCOOMatrix(p.fileName); PRINT_INFO(p.verbosity >= 1, " %u rows, %u columns, %u nonzeros", cooMatrix.numRows, cooMatrix.numCols, cooMatrix.numNonzeros); struct CSRMatrix csrMatrix = coo2csr(cooMatrix); float *inVector = malloc(csrMatrix.numCols * sizeof(float)); float *outVector = malloc(csrMatrix.numRows * sizeof(float)); initVector(inVector, csrMatrix.numCols); // Calculating result on CPU PRINT_INFO(p.verbosity >= 1, "Calculating result on CPU"); omp_set_num_threads(4); Timer timer; startTimer(&timer); #pragma omp parallel for for (uint32_t rowIdx = 0; rowIdx < csrMatrix.numRows; ++rowIdx) { float sum = 0.0f; for (uint32_t i = csrMatrix.rowPtrs[rowIdx]; i < csrMatrix.rowPtrs[rowIdx + 1]; ++i) { uint32_t colIdx = csrMatrix.nonzeros[i].col; float value = csrMatrix.nonzeros[i].value; sum += inVector[colIdx] * value; } outVector[rowIdx] = sum; } stopTimer(&timer); if (p.verbosity == 0) PRINT("%f", getElapsedTime(timer) * 1e3); PRINT_INFO(p.verbosity >= 1, " Elapsed time: %f ms", getElapsedTime(timer) * 1e3); // Deallocate data structures freeCOOMatrix(cooMatrix); freeCSRMatrix(csrMatrix); free(inVector); free(outVector); return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(24*t2-Nz,4)),3*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(12*t1+Ny+21,4)),floord(24*t2+Ny+20,4)),floord(24*t1-24*t2+Nz+Ny+19,4));t3++) { for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(4*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t3+Nx,2048),floord(Nt+Nx-4,2048)),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),4*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),4*t3+2),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
DRB014-outofbounds-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is parallelized. But the inner level loop has out of bound access for b[i][j] when j equals to 0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. Data race pair: b[i][j]@75 vs. b[i][j-1]@75. */ #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int n=100, m=100; double b[n][m]; #pragma omp parallel for private(j) for (i=1;i<n;i++) for (j=0;j<m;j++) // Note there will be out of bound access b[i][j]=b[i][j-1]; printf ("b[50][50]=%f\n",b[50][50]); return 0; }
pi5.c
/* * This code calculates pi using the formula to calculate * the atan(z) which is the integral from 0 to z of 1/(1+x*x) * times dx. atan(1) is 45 degrees or pi/4 */ #include <stdio.h> #include <omp.h> static long num_steps = 100000; /* number of intervals */ double step; /* the size of the interval - dx */ #define NUM_THREADS 2 int main () { int i; /* Loop control variable */ double x; /* Not used */ double pi; /* final results */ double sum=0.0; /* Maintains sum of partial results */ step = 1.0 / (double) num_steps; /* * This may be done more flexibly by using an environment * variable instead. */ omp_set_num_threads(NUM_THREADS); /* * Each thread executes the code in the pragma below * * When more than one value is being put into the * shared variable sum at the same time, they get combined * (reduced) by using addition * * The "for" part of the pragma ensures that i is private */ #pragma omp parallel for reduction(+:sum) private(x) /* * Calculate the integral */ for (i = 1; i <= num_steps; i++) { x = (i - 0.5) * step; sum = sum + 4.0 / (1.0 + x * x); } /* * Multiply by dx */ pi = step * sum; printf( "The computed value of pi is %f\n", pi ); return 0; }
GB_binop__islt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int64) // A.*B function (eWiseMult): GB (_AemultB_08__islt_int64) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int64) // A.*B function (eWiseMult): GB (_AemultB_04__islt_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int64) // A*D function (colscale): GB (_AxD__islt_int64) // D*A function (rowscale): GB (_DxB__islt_int64) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int64) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int64) // C=scalar+B GB (_bind1st__islt_int64) // C=scalar+B' GB (_bind1st_tran__islt_int64) // C=A+scalar GB (_bind2nd__islt_int64) // C=A'+scalar GB (_bind2nd_tran__islt_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT64 || GxB_NO_ISLT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Interp1PrimThirdOrderMUSCL.c
/*! @file Interp1PrimThirdOrderMUSCL.c @author Debojyoti Ghosh @brief 3rd order MUSCL scheme with Koren's limiter (component-wise application to vectors) */ #include <stdio.h> #include <stdlib.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 2 /*! @brief 3rd order MUSCL scheme with Koren's limiter (component-wise) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the 3rd order MUSCL scheme with Koren's limiter on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as: using the 3rd order MUSCL scheme with Koren's limiter as follows: \f{equation}{ \hat{\bf f}_{j+1/2} = {\bf f}_{j-1} + \phi \left[\frac{1}{3}\left({\bf f}_j-{\bf f}_{j-1}\right) + \frac{1}{6}\left({\bf f}_{j-1}-{\bf f}_{j-2}\right)\right] \f} where \f{equation}{ \phi = \frac {3\left({\bf f}_j-{\bf f}_{j-1}\right)\left({\bf f}_{j-1}-{\bf f}_{j-2}\right) + \epsilon} {2\left[\left({\bf f}_j-{\bf f}_{j-1}\right)-\left({\bf f}_{j-1}-{\bf f}_{j-2}\right)\right]^2 + 3\left({\bf f}_j-{\bf f}_{j-1}\right)\left({\bf f}_{j-1}-{\bf f}_{j-2}\right) + \epsilon}. \f} and \f$\epsilon\f$ is a small constant (typically \f$10^{-3}\f$). \b Implementation \b Notes: + The scalar interpolation method is applied to the vector function in a component-wise manner. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). Reference: + van Leer, B., Towards the Ultimate Conservative Difference Scheme. 2: Monotonicity and Conservation Combined in a Second-Order Scheme, J. of Comput. Phys., 14 (4), 1974, pp.361-370, http://dx.doi.org/10.1016/0021-9991(74)90019-9 + Koren, B., A Robust Upwind Discretization Method for Advection, Diffusion and Source Terms, Centrum voor Wiskunde en Informatica, Amsterdam, 1993 */ int Interp1PrimThirdOrderMUSCL( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; MUSCLParameters *muscl = (MUSCLParameters*) solver->interp; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* define some constants */ double one_third = 1.0/3.0; double one_sixth = 1.0/6.0; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); int i; if (upw > 0) { #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); int qm1,qm2,qp1,v; indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); for (v=0; v<nvars; v++) { /* Defining stencil points */ double m2, m1, p1; m2 = fC[qm2*nvars+v]; m1 = fC[qm1*nvars+v]; p1 = fC[qp1*nvars+v]; double fdiff = p1 - m1; double bdiff = m1 - m2; double limit = (3*fdiff*bdiff + muscl->eps) / (2*(fdiff-bdiff)*(fdiff-bdiff) + 3*fdiff*bdiff + muscl->eps); fI[p*nvars+v] = m1 + limit * (one_third*fdiff + one_sixth*bdiff); } } } } else { #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); int qm1,qp1,qp2,v; indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); for (v=0; v<nvars; v++) { /* Defining stencil points */ double m1, p1, p2; m1 = fC[qm1*nvars+v]; p1 = fC[qp1*nvars+v]; p2 = fC[qp2*nvars+v]; double fdiff = p2 - p1; double bdiff = p1 - m1; double limit = (3*fdiff*bdiff + muscl->eps) / (2*(fdiff-bdiff)*(fdiff-bdiff) + 3*fdiff*bdiff + muscl->eps); fI[p*nvars+v] = p1 - limit * (one_third*fdiff + one_sixth*bdiff); } } } } return(0); }
vector_batched.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "seq_mv.h" /*-------------------------------------------------------------------------- * hypre_SeqVectorMassAxpy8 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassAxpy8( HYPRE_Complex *alpha, hypre_Vector **x, hypre_Vector *y, HYPRE_Int k) { HYPRE_Complex *x_data = hypre_VectorData(x[0]); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x[0]); HYPRE_Int i, j, jstart, restk; restk = (k - (k / 8 * 8)); if (k > 7) { for (j = 0; j < k - 7; j += 8) { jstart = j * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[j] * x_data[jstart + i] + alpha[j + 1] * x_data[jstart + i + size] + alpha[j + 2] * x_data[(j + 2) * size + i] + alpha[j + 3] * x_data[(j + 3) * size + i] + alpha[j + 4] * x_data[(j + 4) * size + i] + alpha[j + 5] * x_data[(j + 5) * size + i] + alpha[j + 6] * x_data[(j + 6) * size + i] + alpha[j + 7] * x_data[(j + 7) * size + i]; } } } if (restk == 1) { jstart = (k - 1) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 1] * x_data[jstart + i]; } } else if (restk == 2) { jstart = (k - 2) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 2] * x_data[jstart + i] + alpha[k - 1] * x_data[jstart + size + i]; } } else if (restk == 3) { jstart = (k - 3) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 3] * x_data[jstart + i] + alpha[k - 2] * x_data[jstart + size + i] + alpha[k - 1] * x_data[(k - 1) * size + i]; } } else if (restk == 4) { jstart = (k - 4) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i]; } } else if (restk == 5) { #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += + alpha[k - 5] * x_data[(k - 5) * size + i] + alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i]; } } else if (restk == 6) { jstart = (k - 6) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 6] * x_data[jstart + i] + alpha[k - 5] * x_data[jstart + i + size] + alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i]; } } else if (restk == 7) { jstart = (k - 7) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 7] * x_data[jstart + i] + alpha[k - 6] * x_data[jstart + i + size] + alpha[k - 5] * x_data[(k - 5) * size + i] + alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassAxpy4 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassAxpy4( HYPRE_Complex *alpha, hypre_Vector **x, hypre_Vector *y, HYPRE_Int k) { HYPRE_Complex *x_data = hypre_VectorData(x[0]); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x[0]); HYPRE_Int i, j, jstart, restk; restk = (k - (k / 4 * 4)); if (k > 3) { for (j = 0; j < k - 3; j += 4) { jstart = j * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[j] * x_data[jstart + i] + alpha[j + 1] * x_data[jstart + i + size] + alpha[j + 2] * x_data[(j + 2) * size + i] + alpha[j + 3] * x_data[(j + 3) * size + i]; } } } if (restk == 1) { jstart = (k - 1) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 1] * x_data[jstart + i]; } } else if (restk == 2) { jstart = (k - 2) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 2] * x_data[jstart + i] + alpha[k - 1] * x_data[jstart + size + i]; } } else if (restk == 3) { jstart = (k - 3) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k - 3] * x_data[jstart + i] + alpha[k - 2] * x_data[jstart + size + i] + alpha[k - 1] * x_data[(k - 1) * size + i]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassAxpy( HYPRE_Complex *alpha, hypre_Vector **x, hypre_Vector *y, HYPRE_Int k, HYPRE_Int unroll) { HYPRE_Complex *x_data = hypre_VectorData(x[0]); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x[0]); HYPRE_Int i, j, jstart; if (unroll == 8) { hypre_SeqVectorMassAxpy8(alpha, x, y, k); return hypre_error_flag; } else if (unroll == 4) { hypre_SeqVectorMassAxpy4(alpha, x, y, k); return hypre_error_flag; } else { for (j = 0; j < k; j++) { jstart = j * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[j] * x_data[jstart + i]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassInnerProd8 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassInnerProd8( hypre_Vector *x, hypre_Vector **y, HYPRE_Int k, HYPRE_Real *result) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res1; HYPRE_Real res2; HYPRE_Real res3; HYPRE_Real res4; HYPRE_Real res5; HYPRE_Real res6; HYPRE_Real res7; HYPRE_Real res8; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; HYPRE_Int jstart4; HYPRE_Int jstart5; HYPRE_Int jstart6; HYPRE_Int jstart7; restk = (k - (k / 8 * 8)); if (k > 7) { for (j = 0; j < k - 7; j += 8) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; res6 = 0; res7 = 0; res8 = 0; jstart = j * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; jstart5 = jstart4 + size; jstart6 = jstart5 + size; jstart7 = jstart6 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7,res8) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i]; res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i]; res7 += hypre_conj(y_data[jstart6 + i]) * x_data[i]; res8 += hypre_conj(y_data[jstart7 + i]) * x_data[i]; } result[j] = res1; result[j + 1] = res2; result[j + 2] = res3; result[j + 3] = res4; result[j + 4] = res5; result[j + 5] = res6; result[j + 6] = res7; result[j + 7] = res8; } } if (restk == 1) { res1 = 0; jstart = (k - 1) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; } result[k - 1] = res1; } else if (restk == 2) { res1 = 0; res2 = 0; jstart = (k - 2) * size; jstart1 = jstart + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; } result[k - 2] = res1; result[k - 1] = res2; } else if (restk == 3) { res1 = 0; res2 = 0; res3 = 0; jstart = (k - 3) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; } result[k - 3] = res1; result[k - 2] = res2; result[k - 1] = res3; } else if (restk == 4) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; jstart = (k - 4) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i]; } result[k - 4] = res1; result[k - 3] = res2; result[k - 2] = res3; result[k - 1] = res4; } else if (restk == 5) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; jstart = (k - 5) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i]; } result[k - 5] = res1; result[k - 4] = res2; result[k - 3] = res3; result[k - 2] = res4; result[k - 1] = res5; } else if (restk == 6) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; res6 = 0; jstart = (k - 6) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; jstart5 = jstart4 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i]; res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i]; } result[k - 6] = res1; result[k - 5] = res2; result[k - 4] = res3; result[k - 3] = res4; result[k - 2] = res5; result[k - 1] = res6; } else if (restk == 7) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; res6 = 0; res7 = 0; jstart = (k - 7) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; jstart5 = jstart4 + size; jstart6 = jstart5 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i]; res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i]; res7 += hypre_conj(y_data[jstart6 + i]) * x_data[i]; } result[k - 7] = res1; result[k - 6] = res2; result[k - 5] = res3; result[k - 4] = res4; result[k - 3] = res5; result[k - 2] = res6; result[k - 1] = res7; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassInnerProd4 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassInnerProd4( hypre_Vector *x, hypre_Vector **y, HYPRE_Int k, HYPRE_Real *result) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res1; HYPRE_Real res2; HYPRE_Real res3; HYPRE_Real res4; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; restk = (k - (k / 4 * 4)); if (k > 3) { for (j = 0; j < k - 3; j += 4) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; jstart = j * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i]; } result[j] = res1; result[j + 1] = res2; result[j + 2] = res3; result[j + 3] = res4; } } if (restk == 1) { res1 = 0; jstart = (k - 1) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; } result[k - 1] = res1; } else if (restk == 2) { res1 = 0; res2 = 0; jstart = (k - 2) * size; jstart1 = jstart + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; } result[k - 2] = res1; result[k - 1] = res2; } else if (restk == 3) { res1 = 0; res2 = 0; res3 = 0; jstart = (k - 3) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart + i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i]; } result[k - 3] = res1; result[k - 2] = res2; result[k - 1] = res3; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassDotpTwo8 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassDotpTwo8( hypre_Vector *x, hypre_Vector *y, hypre_Vector **z, HYPRE_Int k, HYPRE_Real *result_x, HYPRE_Real *result_y) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Complex *z_data = hypre_VectorData(z[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res_x1; HYPRE_Real res_x2; HYPRE_Real res_x3; HYPRE_Real res_x4; HYPRE_Real res_x5; HYPRE_Real res_x6; HYPRE_Real res_x7; HYPRE_Real res_x8; HYPRE_Real res_y1; HYPRE_Real res_y2; HYPRE_Real res_y3; HYPRE_Real res_y4; HYPRE_Real res_y5; HYPRE_Real res_y6; HYPRE_Real res_y7; HYPRE_Real res_y8; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; HYPRE_Int jstart4; HYPRE_Int jstart5; HYPRE_Int jstart6; HYPRE_Int jstart7; restk = (k - (k / 8 * 8)); if (k > 7) { for (j = 0; j < k - 7; j += 8) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_x6 = 0; res_x7 = 0; res_x8 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; res_y6 = 0; res_y7 = 0; res_y8 = 0; jstart = j * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; jstart5 = jstart4 + size; jstart6 = jstart5 + size; jstart7 = jstart6 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_x8,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7,res_y8) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i]; res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i]; res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i]; res_x7 += hypre_conj(z_data[jstart6 + i]) * x_data[i]; res_y7 += hypre_conj(z_data[jstart6 + i]) * y_data[i]; res_x8 += hypre_conj(z_data[jstart7 + i]) * x_data[i]; res_y8 += hypre_conj(z_data[jstart7 + i]) * y_data[i]; } result_x[j] = res_x1; result_x[j + 1] = res_x2; result_x[j + 2] = res_x3; result_x[j + 3] = res_x4; result_x[j + 4] = res_x5; result_x[j + 5] = res_x6; result_x[j + 6] = res_x7; result_x[j + 7] = res_x8; result_y[j] = res_y1; result_y[j + 1] = res_y2; result_y[j + 2] = res_y3; result_y[j + 3] = res_y4; result_y[j + 4] = res_y5; result_y[j + 5] = res_y6; result_y[j + 6] = res_y7; result_y[j + 7] = res_y8; } } if (restk == 1) { res_x1 = 0; res_y1 = 0; jstart = (k - 1) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; } result_x[k - 1] = res_x1; result_y[k - 1] = res_y1; } else if (restk == 2) { res_x1 = 0; res_x2 = 0; res_y1 = 0; res_y2 = 0; jstart = (k - 2) * size; jstart1 = jstart + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; } result_x[k - 2] = res_x1; result_x[k - 1] = res_x2; result_y[k - 2] = res_y1; result_y[k - 1] = res_y2; } else if (restk == 3) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; jstart = (k - 3) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; } result_x[k - 3] = res_x1; result_x[k - 2] = res_x2; result_x[k - 1] = res_x3; result_y[k - 3] = res_y1; result_y[k - 2] = res_y2; result_y[k - 1] = res_y3; } else if (restk == 4) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; jstart = (k - 4) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i]; } result_x[k - 4] = res_x1; result_x[k - 3] = res_x2; result_x[k - 2] = res_x3; result_x[k - 1] = res_x4; result_y[k - 4] = res_y1; result_y[k - 3] = res_y2; result_y[k - 2] = res_y3; result_y[k - 1] = res_y4; } else if (restk == 5) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; jstart = (k - 5) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_y1,res_y2,res_y3,res_y4,res_y5) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i]; } result_x[k - 5] = res_x1; result_x[k - 4] = res_x2; result_x[k - 3] = res_x3; result_x[k - 2] = res_x4; result_x[k - 1] = res_x5; result_y[k - 5] = res_y1; result_y[k - 4] = res_y2; result_y[k - 3] = res_y3; result_y[k - 2] = res_y4; result_y[k - 1] = res_y5; } else if (restk == 6) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_x6 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; res_y6 = 0; jstart = (k - 6) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; jstart5 = jstart4 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i]; res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i]; res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i]; } result_x[k - 6] = res_x1; result_x[k - 5] = res_x2; result_x[k - 4] = res_x3; result_x[k - 3] = res_x4; result_x[k - 2] = res_x5; result_x[k - 1] = res_x6; result_y[k - 6] = res_y1; result_y[k - 5] = res_y2; result_y[k - 4] = res_y3; result_y[k - 3] = res_y4; result_y[k - 2] = res_y5; result_y[k - 1] = res_y6; } else if (restk == 7) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_x6 = 0; res_x7 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; res_y6 = 0; res_y7 = 0; jstart = (k - 7) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; jstart4 = jstart3 + size; jstart5 = jstart4 + size; jstart6 = jstart5 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i]; res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i]; res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i]; res_x7 += hypre_conj(z_data[jstart6 + i]) * x_data[i]; res_y7 += hypre_conj(z_data[jstart6 + i]) * y_data[i]; } result_x[k - 7] = res_x1; result_x[k - 6] = res_x2; result_x[k - 5] = res_x3; result_x[k - 4] = res_x4; result_x[k - 3] = res_x5; result_x[k - 2] = res_x6; result_x[k - 1] = res_x7; result_y[k - 7] = res_y1; result_y[k - 6] = res_y2; result_y[k - 5] = res_y3; result_y[k - 4] = res_y4; result_y[k - 3] = res_y5; result_y[k - 2] = res_y6; result_y[k - 1] = res_y7; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassDotpTwo4 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassDotpTwo4( hypre_Vector *x, hypre_Vector *y, hypre_Vector **z, HYPRE_Int k, HYPRE_Real *result_x, HYPRE_Real *result_y) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Complex *z_data = hypre_VectorData(z[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res_x1; HYPRE_Real res_x2; HYPRE_Real res_x3; HYPRE_Real res_x4; HYPRE_Real res_y1; HYPRE_Real res_y2; HYPRE_Real res_y3; HYPRE_Real res_y4; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; restk = (k - (k / 4 * 4)); if (k > 3) { for (j = 0; j < k - 3; j += 4) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; jstart = j * size; jstart1 = jstart + size; jstart2 = jstart1 + size; jstart3 = jstart2 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i]; } result_x[j] = res_x1; result_x[j + 1] = res_x2; result_x[j + 2] = res_x3; result_x[j + 3] = res_x4; result_y[j] = res_y1; result_y[j + 1] = res_y2; result_y[j + 2] = res_y3; result_y[j + 3] = res_y4; } } if (restk == 1) { res_x1 = 0; res_y1 = 0; jstart = (k - 1) * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; } result_x[k - 1] = res_x1; result_y[k - 1] = res_y1; } else if (restk == 2) { res_x1 = 0; res_x2 = 0; res_y1 = 0; res_y2 = 0; jstart = (k - 2) * size; jstart1 = jstart + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; } result_x[k - 2] = res_x1; result_x[k - 1] = res_x2; result_y[k - 2] = res_y1; result_y[k - 1] = res_y2; } else if (restk == 3) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; jstart = (k - 3) * size; jstart1 = jstart + size; jstart2 = jstart1 + size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i]; } result_x[k - 3] = res_x1; result_x[k - 2] = res_x2; result_x[k - 1] = res_x3; result_y[k - 3] = res_y1; result_y[k - 2] = res_y2; result_y[k - 1] = res_y3; } return hypre_error_flag; } HYPRE_Int hypre_SeqVectorMassInnerProd( hypre_Vector *x, hypre_Vector **y, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y[0]); HYPRE_Real res; HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, jstart; if (unroll == 8) { hypre_SeqVectorMassInnerProd8(x, y, k, result); return hypre_error_flag; } else if (unroll == 4) { hypre_SeqVectorMassInnerProd4(x, y, k, result); return hypre_error_flag; } else { for (j = 0; j < k; j++) { res = 0; jstart = j * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res += hypre_conj(y_data[jstart + i]) * x_data[i]; } result[j] = res; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassDotpTwo *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassDotpTwo( hypre_Vector *x, hypre_Vector *y, hypre_Vector **z, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result_x, HYPRE_Real *result_y) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Complex *z_data = hypre_VectorData(z[0]); HYPRE_Real res_x, res_y; HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, jstart; if (unroll == 8) { hypre_SeqVectorMassDotpTwo8(x, y, z, k, result_x, result_y); return hypre_error_flag; } else if (unroll == 4) { hypre_SeqVectorMassDotpTwo4(x, y, z, k, result_x, result_y); return hypre_error_flag; } else { for (j = 0; j < k; j++) { res_x = 0; //result_x[j]; res_y = 0; //result_y[j]; jstart = j * size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x,res_y) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x += hypre_conj(z_data[jstart + i]) * x_data[i]; res_y += hypre_conj(z_data[jstart + i]) * y_data[i]; } result_x[j] = res_x; result_y[j] = res_y; } } return hypre_error_flag; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 131072UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait != BlendPixelTrait) && (draw_info->fill.alpha_trait == BlendPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { (void) GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelInfoPixel(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelInfo *) NULL); assert(stop_color != (const PixelInfo *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetPixelInfo(image,&gradient->stops[i].color); gradient->stops[0].color=(*start_color); gradient->stops[0].offset=0.0; gradient->stops[1].color=(*stop_color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ (void) SetImageColorspace(image,start_color->colorspace,exception); status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *restrict p; register Quantum *restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel=GetPixelChannelChannel(linear_image,i); PixelTrait traits=GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if (((paint_traits & CopyPixelTrait) != 0) || (GetPixelReadMask(linear_image,p) == 0)) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelInfo *target,const PixelInfo *fill, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(fill) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((fill->alpha_trait == BlendPixelTrait) && (image->alpha_trait != BlendPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelInfoPixel(image,fill,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
pr35738.c
/* PR c/35738 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (void); void bar (void *p) { int i = 0; char q[10]; #pragma omp atomic i += q; /* { dg-warning "makes integer from pointer without a cast" } */ #pragma omp atomic i += foo; /* { dg-warning "makes integer from pointer without a cast" } */ #pragma omp atomic i += p; /* { dg-warning "makes integer from pointer without a cast" } */ }
hoImageRegDeformationFieldSolver.h
/** \file hoImageRegDeformationFieldSolver.h \brief Implement the PDE solver for deformation field non-linear image registration The PDE solver is a classical gradient descent method, derived from the calculus of variation: [1] Gerardo Hermosillo, Christophe Chefd'Hotel, Olivier Faugeras. Variational Methods for Multimodal Image Matching. International Journal of Computer Vision. December 2002, Volume 50, Issue 3, pp 329-343. http://link.springer.com/article/10.1023%2FA%3A1020830525823 [2] Gerardo Hermosillo. Variational Methods for Multimodal Image Matching. PhD Thesis, UNIVERSIT´E DE NICE - SOPHIA ANTIPOLIS. May 2002. http://webdocs.cs.ualberta.ca/~dana/readingMedIm/papers/hermosilloPhD.pdf [3] Christophe Chefd'Hotel, Gerardo Hermosillo, Olivier D. Faugeras: Flows of diffeomorphisms for multimodal image registration. ISBI 2002: 753-756. http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1029367&tag=1 [4] Christophe Chefd'Hotel, Geometric Methods in Computer Vision and Image Processing : Contributions and Applications. PhD Thesis, April 2005. The code is based on the listed source code at page 185 - 187 in ref [2] and extended according to the ref [3] and [4]. The divergence-free transformation using the Hodge decomposition theorem and FFT computation are listed at page 74 and 77 - 78 in ref [4]. \author Hui Xue */ #ifndef hoImageRegDeformationFieldSolver_H_ #define hoImageRegDeformationFieldSolver_H_ #pragma once #include "hoImageRegNonParametricSolver.h" #include "hoImageRegDeformationField.h" #include "hoNDFFT.h" #ifdef max #undef max #endif // max #ifdef min #undef min #endif // min namespace Gadgetron { /// ValueType: image pixel value type /// CoordType: transformation data type template<typename TargetType, typename SourceType, typename CoordType> class hoImageRegDeformationFieldSolver : public hoImageRegNonParametricSolver<TargetType, SourceType, CoordType> { public: typedef hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType> Self; typedef hoImageRegNonParametricSolver<TargetType, SourceType, CoordType> BaseClass; typedef typename TargetType::value_type ValueType; enum { D = TargetType::NDIM }; enum { DIn = TargetType::NDIM }; enum { DOut = SourceType::NDIM }; typedef hoNDImage<ValueType, 2> Target2DType; typedef Target2DType Source2DType; typedef hoNDImage<ValueType, 3> Target3DType; typedef Target2DType Source3DType; typedef ValueType T; typedef ValueType element_type; typedef ValueType value_type; typedef CoordType coord_type; typedef hoNDImage< std::complex<CoordType>, D> DeformCplxType; typedef hoNDImage< std::complex<float>, D> DeformFLTCplxType; typedef typename BaseClass::InterpolatorType InterpolatorType; typedef hoImageRegDeformationField<CoordType, D> TransformationType; typedef typename TransformationType::input_point_type input_point_type; typedef typename TransformationType::output_point_type output_point_type; typedef typename TransformationType::jacobian_position_type jacobian_position_type; typedef typename TransformationType::DeformationFieldType DeformationFieldType; typedef typename BaseClass::ImageRegWarperType ImageRegWarperType; typedef typename BaseClass::ImageRegDissimilarityType ImageRegDissimilarityType; hoImageRegDeformationFieldSolver(); virtual ~hoImageRegDeformationFieldSolver(); void setTransform(TransformationType& transform) { transform_ = &transform; } virtual bool initialize(); virtual bool solve(); /// perform one iteration of optimization virtual bool solve_once(TargetType* target, SourceType* source, TargetType& warped, unsigned int iter_num, unsigned int max_iter_num, unsigned int& divTimes, ValueType& curr_dissimilarity, ValueType& prev_dissimilarity, TransformationType* transform, ImageRegWarperType& warper, ImageRegDissimilarityType& dissimilarity, bool& stopIteration, TargetType* gradient_warpped, DeformationFieldType* deform_delta, DeformationFieldType* deform_updated, DeformationFieldType& deform_norm , DeformationFieldType& deform_norm_one_dim, CoordType* deform_delta_scale_factor); /// perform the hodge decomposition on the deformation field bool hodge_decomposition(DeformationFieldType* deform); /// print function virtual void print(std::ostream& os) const; /// the regularization method in ref [3] is used /// in the unit of pixel ValueType regularization_hilbert_strength_[D]; /// whether the deformation can warp a point outside the FOV /// InFOV constraint bool apply_in_FOV_constraint_; /// whether to apply the divergence free constraint bool apply_divergence_free_constraint_; using BaseClass::iter_num_; using BaseClass::max_iter_num_; using BaseClass::dissimilarity_thres_; using BaseClass::parameter_thres_; using BaseClass::div_num_; using BaseClass::step_size_para_; using BaseClass::step_size_div_para_; using BaseClass::verbose_; using BaseClass::gt_timer1_; using BaseClass::gt_timer2_; using BaseClass::gt_timer3_; using BaseClass::performTiming_; using BaseClass::gt_exporter_; using BaseClass::debugFolder_; protected: TransformationType* transform_; ValueType curr_dissimilarity_; ValueType prev_dissimilarity_; DeformationFieldType deform_delta_[D]; DeformationFieldType deform_updated_[D]; DeformationFieldType deform_norm_; DeformationFieldType deform_norm_one_dim_; TargetType gradient_warpped_[D]; DeformCplxType deform_cplx_[D]; DeformCplxType deform_fft_cplx_[D]; DeformCplxType deform_fft_buf_cplx_[D]; /// compensate for the non-isotropic pixel sizes coord_type deform_delta_scale_factor_[D]; bool hodge_decomposition_image_coordinate(DeformationFieldType* deform); using BaseClass::target_; using BaseClass::source_; using BaseClass::warpped_; using BaseClass::bg_value_; using BaseClass::interp_; using BaseClass::warper_; using BaseClass::dissimilarity_; using BaseClass::use_world_coordinate_; }; template<typename TargetType, typename SourceType, typename CoordType> hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>:: hoImageRegDeformationFieldSolver() : BaseClass() { for ( unsigned int ii=0; ii<D; ii++ ) { regularization_hilbert_strength_[ii] = 12; deform_delta_scale_factor_[ii] = 1; } apply_in_FOV_constraint_ = false; apply_divergence_free_constraint_ = false; } template<typename TargetType, typename SourceType, typename CoordType> hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::~hoImageRegDeformationFieldSolver() { } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::initialize() { GADGET_CHECK_RETURN_FALSE(BaseClass::initialize()); warper_->setTransformation(*transform_); std::vector<size_t> dim; target_->get_dimensions(dim); deform_norm_.copyImageInfo(*target_); deform_norm_one_dim_.copyImageInfo(*target_); unsigned int ii; for ( ii=0; ii<D; ii++ ) { deform_delta_[ii].copyImageInfo(*target_); Gadgetron::clear(deform_delta_[ii]); deform_updated_[ii].copyImageInfo(*target_); Gadgetron::clear(deform_updated_[ii]); if (apply_divergence_free_constraint_) { deform_cplx_[ii].copyImageInfo(*target_); Gadgetron::clear(deform_cplx_[ii]); deform_fft_cplx_[ii].copyImageInfo(*target_); Gadgetron::clear(deform_fft_cplx_[ii]); deform_fft_buf_cplx_[ii].copyImageInfo(*target_); Gadgetron::clear(deform_fft_buf_cplx_[ii]); } gradient_warpped_[ii].copyImageInfo(*target_); } deform_delta_scale_factor_[0] = 1; for ( ii=0; ii<D; ii++ ) { deform_delta_scale_factor_[ii] = target_->get_pixel_size(0)/target_->get_pixel_size(ii); } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>:: solve_once(TargetType* target, SourceType* source, TargetType& warped, unsigned int iter_num, unsigned int max_iter_num, unsigned int& divTimes, ValueType& curr_dissimilarity, ValueType& prev_dissimilarity, TransformationType* transform, ImageRegWarperType& warper, ImageRegDissimilarityType& dissimilarity, bool& stopIteration, TargetType* gradient_warpped, DeformationFieldType* deform_delta, DeformationFieldType* deform_updated, DeformationFieldType& deform_norm , DeformationFieldType& deform_norm_one_dim, CoordType* deform_delta_scale_factor) { try { unsigned int ii; long long sx = (long long)(target_->get_size(0)); long long sy = (long long)(target_->get_size(1)); long long sz = (long long)(target_->get_size(2)); long long x, y, z; if ( !debugFolder_.empty() ) { for ( ii=0; ii<D; ii++ ) { std::ostringstream ostr; ostr << "DeformationFieldSolver_deformfield_" << ii; const DeformationFieldType& def = transform->getDeformationField(ii); gt_exporter_.export_image(def, debugFolder_+ostr.str()); } } // warp the source if ( use_world_coordinate_ ) { GADGET_CHECK_RETURN_FALSE(warper.warpWithDeformationFieldWorldCoordinate(*target, *source, warped)); } else { GADGET_CHECK_RETURN_FALSE(warper.warp(*target, *source, use_world_coordinate_, warped)); } if ( !debugFolder_.empty() ) { gt_exporter_.export_image(warped, debugFolder_+"DeformationFieldSolver_warpped"); } // evaluate the dissimilarity and get the intensity comparison function GADGET_CHECK_RETURN_FALSE(dissimilarity.evaluateDeriv(warped)); curr_dissimilarity = dissimilarity.getDissimilarity(); if ( verbose_ ) { GDEBUG_STREAM("--> Iteration " << iter_num << " [out of " << max_iter_num << "] : \t" << curr_dissimilarity); } if ( prev_dissimilarity < curr_dissimilarity + dissimilarity_thres_ ) { if ( ++divTimes > div_num_ ) { stopIteration = true; return true; } step_size_para_ *= step_size_div_para_; if ( verbose_ ) { GDEBUG_STREAM("----> Parameter division " << divTimes << " [out of " << div_num_ << "] "); } } prev_dissimilarity = curr_dissimilarity; /// gradient is in the 1/pixel unit Gadgetron::gradient(warped, gradient_warpped); const TargetType& deriv = dissimilarity.getDeriv(); size_t N = deriv.get_number_of_elements(); const ValueType* pD = deriv.begin(); for ( ii=0; ii<D; ii++ ) { ValueType* pG = gradient_warpped[ii].begin(); CoordType* pR = deform_delta[ii].begin(); for (size_t n = 0; n < N; n++) { pR[n] = pG[n] * pD[n]; } // Gadgetron::multiply(gradient_warpped[ii], deriv, deform_delta[ii]); } if ( !debugFolder_.empty() ) { gt_exporter_.export_image(deriv, debugFolder_+"DeformationFieldSolver_deriv"); for ( ii=0; ii<D; ii++ ) { std::ostringstream ostr; ostr << "DeformationFieldSolver_gradient_warpped_" << ii; gt_exporter_.export_image(gradient_warpped[ii], debugFolder_+ostr.str()); std::ostringstream ostr2; ostr2 << "DeformationFieldSolver_deform_delta_" << ii; gt_exporter_.export_image(deform_delta[ii], debugFolder_+ostr2.str()); } } /// compensate for non-isotropic pixel sizes for ( ii=0; ii<D; ii++ ) { if ( std::abs(deform_delta_scale_factor[ii]-1) > FLT_EPSILON ) { Gadgetron::scal(deform_delta_scale_factor[ii], deform_delta[ii]); } } /// filter sigma is in the unit of pixel size for ( ii=0; ii<D; ii++ ) { Gadgetron::filterGaussian(deform_delta[ii], regularization_hilbert_strength_); } if ( !debugFolder_.empty() ) { for ( ii=0; ii<D; ii++ ) { std::ostringstream ostr; ostr << "DeformationFieldSolver_deform_delta_filtered_" << ii; gt_exporter_.export_image(deform_delta[ii], debugFolder_+ostr.str()); } } // compute the max norm of hilbert derivative Gadgetron::clear(deform_norm); for ( ii=0; ii<D; ii++ ) { Gadgetron::multiply(deform_delta[ii], deform_delta[ii], deform_norm_one_dim); Gadgetron::add(deform_norm_one_dim, deform_norm, deform_norm); } CoordType* pDeformNorm = deform_norm.begin(); ValueType max_norm_deform_delta = pDeformNorm[0]; // size_t max_ind; for ( ii=1; ii<sx*sy; ii++ ) { if ( max_norm_deform_delta < pDeformNorm[ii] ) max_norm_deform_delta = pDeformNorm[ii]; } // Gadgetron::maxAbsolute(deform_norm, max_norm_deform_delta, max_ind); ValueType PDE_time_integration_step_size = 0; if ( max_norm_deform_delta > 1e-5 ) { PDE_time_integration_step_size = step_size_para_ / std::sqrt(max_norm_deform_delta); } if ( PDE_time_integration_step_size > 0 ) { for ( ii=0; ii<D; ii++ ) { Gadgetron::scal( (CoordType)(PDE_time_integration_step_size), deform_delta[ii]); } if ( use_world_coordinate_ ) { // Note: the deform_delta is in the unit of pixel so far, need to convert it to the world coordinate if ( D == 2 ) { CoordType ix, iy, wx, wy, pX, pY, deltaWX, deltaWY; // #pragma omp parallel for default(none) private(y, x, ix, iy, wx, wy, pX, pY, deltaWX, deltaWY) shared(sx, sy, target, deform_delta, deform_updated, transform) num_threads(2) for ( y=0; y<sy; y++ ) { for ( x=0; x<sx; x++ ) { size_t offset = x + y*sx; target->image_to_world( (size_t)x, (size_t)y, wx, wy); CoordType deltaX = deform_delta[0](offset); CoordType deltaY = deform_delta[1](offset); // because the delta deformation is in the pixel size unit, it needs to be converted to world coordinate target->image_to_world( deltaX, deltaY, deltaWX, deltaWY); target->world_to_image(wx+deltaWX, wy+deltaWY, ix, iy); transform->get(ix, iy, pX, pY); deform_updated[0](offset) = deltaWX + pX; deform_updated[1](offset) = deltaWY + pY; } } } else if ( D == 3 ) { CoordType ix, iy, iz, wx, wy, wz, pX, pY, pZ, deltaWX, deltaWY, deltaWZ; #pragma omp parallel for default(none) private(y, x, z, ix, iy, iz, wx, wy, wz, pX, pY, pZ, deltaWX, deltaWY, deltaWZ) shared(sx, sy, sz, target, deform_delta, deform_updated, transform) for ( z=0; z<sz; z++ ) { for ( y=0; y<sy; y++ ) { for ( x=0; x<sx; x++ ) { size_t offset = x + y*sx + z*sx*sy; target->image_to_world( (size_t)x, (size_t)y, (size_t)z, wx, wy, wz); CoordType deltaX = deform_delta[0](offset); CoordType deltaY = deform_delta[1](offset); CoordType deltaZ = deform_delta[2](offset); target->image_to_world( deltaX, deltaY, deltaZ, deltaWX, deltaWY, deltaWZ); target->world_to_image(wx+deltaWX, wy+deltaWY, wz+deltaWZ, ix, iy, iz); transform->get(ix, iy, iz, pX, pY, pZ); deform_updated[0](offset) = deltaWX + pX; deform_updated[1](offset) = deltaWY + pY; deform_updated[2](offset) = deltaWZ + pZ; } } } } else { size_t N = target_->get_number_of_elements(); long long n; #pragma omp parallel default(none) private(n, ii) shared(N, target, deform_delta, deform_updated, transform) { size_t ind[D]; CoordType pos[D]; CoordType pDelta[D]; CoordType pDeltaWorld[D]; CoordType indDeform[D]; CoordType pDeform[D]; #pragma omp for for ( n=0; n<(long long)N; n++ ) { deform_delta[0].calculate_index(n, ind); target->image_to_world( ind, pos); for ( ii=0; ii<D; ii++ ) { pDelta[ii] = deform_delta[ii](n); } target->image_to_world( pDelta, pDeltaWorld); for ( ii=0; ii<D; ii++ ) { pDeltaWorld[ii] += pos[ii]; } target->world_to_image(pDeltaWorld, indDeform); transform->get(indDeform, pDeform); for ( ii=0; ii<D; ii++ ) { deform_updated[ii](n) = pDeltaWorld[ii] + pDeform[ii]; } } } } } else { if ( D == 2 ) { CoordType pX, pY; // #pragma omp parallel for default(none) private(y, x, pX, pY) shared(sx, sy, deform_delta, deform_updated, transform) num_threads(2) for ( y=0; y<sy; y++ ) { for ( x=0; x<sx; x++ ) { size_t offset = x + y*sx; CoordType deltaX = deform_delta[0](offset); CoordType deltaY = deform_delta[1](offset); transform->get(x+deltaX, y+deltaY, pX, pY); deform_updated[0](offset) = deltaX + pX; deform_updated[1](offset) = deltaY + pY; } } } else if ( D == 3 ) { CoordType pX, pY, pZ; #pragma omp parallel for default(none) private(y, x, z, pX, pY, pZ) shared(sx, sy, sz, deform_delta, deform_updated, transform) for ( z=0; z<sz; z++ ) { for ( y=0; y<sy; y++ ) { for ( x=0; x<sx; x++ ) { size_t offset = x + y*sx + z*sx*sy; CoordType deltaX = deform_delta[0](offset); CoordType deltaY = deform_delta[1](offset); CoordType deltaZ = deform_delta[2](offset); transform->get(x+deltaX, y+deltaY, z+deltaZ, pX, pY, pZ); deform_updated[0](offset) = deltaX + pX; deform_updated[1](offset) = deltaY + pY; deform_updated[2](offset) = deltaZ + pZ; } } } } else { size_t N = target_->get_number_of_elements(); long long n; #pragma omp parallel default(none) private(n, ii) shared(N, deform_delta, deform_updated, transform) { size_t ind[D]; CoordType pDelta[D]; CoordType indDeform[D]; CoordType pDeform[D]; #pragma omp for for ( n=0; n<(long long)N; n++ ) { deform_delta[0].calculate_index(n, ind); for ( ii=0; ii<D; ii++ ) { pDelta[ii] = deform_delta[ii](n); indDeform[ii] = ind[ii] + pDelta[ii]; } transform->get(indDeform, pDeform); for ( ii=0; ii<D; ii++ ) { deform_updated[ii](n) = pDelta[ii] + pDeform[ii]; } } } } } if ( !debugFolder_.empty() ) { for ( ii=0; ii<D; ii++ ) { std::ostringstream ostr; ostr << "DeformationFieldSolver_deform_updated_" << ii; gt_exporter_.export_image(deform_updated[ii], debugFolder_+ostr.str()); } } // add the divergence constraint if ( apply_divergence_free_constraint_ ) { GADGET_CHECK_RETURN_FALSE(this->hodge_decomposition(deform_updated)); } // add the InFOV constraint if ( apply_in_FOV_constraint_ ) { if ( !use_world_coordinate_ ) { if ( D == 2 ) { CoordType pX, pY; // #pragma omp parallel for default(none) private(y, x, pX, pY) shared(sx, sy, deform_updated) num_threads(2) for ( y=0; y<sy; y++ ) { for ( x=0; x<sx; x++ ) { size_t offset = x + y*sx; CoordType tx = x + deform_updated[0](offset); CoordType ty = y + deform_updated[1](offset); if ( tx < 0 ) { deform_updated[0](offset) = FLT_EPSILON - x; } else if (tx > sx-1 ) { deform_updated[0](offset) = sx-1-FLT_EPSILON - x; } if ( ty < 0 ) { deform_updated[1](offset) = FLT_EPSILON - y; } else if (ty > sy-1 ) { deform_updated[1](offset) = sy-1-FLT_EPSILON - y; } } } } } } for ( ii=0; ii<D; ii++ ) { transform->setDeformationField(deform_updated[ii], ii); } } } catch(...) { return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::solve() { try { GADGET_CHECK_RETURN_FALSE(this->initialize()); prev_dissimilarity_ = std::numeric_limits<ValueType>::max(); unsigned int divTimes = 0; dissimilarity_->initialize(*target_); if ( !debugFolder_.empty() ) { gt_exporter_.export_image(*target_, debugFolder_+"DeformationFieldSolver_target"); gt_exporter_.export_image(*source_, debugFolder_+"DeformationFieldSolver_source"); } bool stopIteration = false; if ( verbose_ ) { GDEBUG_STREAM("--> DeformationFieldSolver ... "); } for ( iter_num_=0; iter_num_<max_iter_num_; iter_num_++ ) { GADGET_CHECK_RETURN_FALSE( this->solve_once(target_, source_, warpped_, iter_num_, max_iter_num_, divTimes, curr_dissimilarity_, prev_dissimilarity_, transform_, *warper_, *dissimilarity_, stopIteration, gradient_warpped_, deform_delta_, deform_updated_, deform_norm_ , deform_norm_one_dim_, deform_delta_scale_factor_) ); if ( stopIteration ) break; } if ( verbose_ ) { GDEBUG_STREAM("----> Total iteration number : " << iter_num_); } } catch(...) { GERROR_STREAM("Errors happened in hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::solve() ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::hodge_decomposition(DeformationFieldType* deform) { try { size_t d; if (use_world_coordinate_) { // the deformation field is in the world coordinate size_t N = deform[0].get_number_of_elements(); for (d = 0; d < D; d++) { ValueType delta = 1/deform[0].get_pixel_size(d); CoordType* pDeform = deform[d].begin(); for (size_t n = 0; n < N; n++) { pDeform[n] *= delta; } } GADGET_CHECK_RETURN_FALSE(hodge_decomposition_image_coordinate(deform)); for (d = 0; d < D; d++) { ValueType delta = deform[0].get_pixel_size(d); CoordType* pDeform = deform[d].begin(); for (size_t n = 0; n < N; n++) { pDeform[n] *= delta; } } } else { // the deformation field is in the pixel coordinate GADGET_CHECK_RETURN_FALSE(hodge_decomposition_image_coordinate(deform)); } } catch (...) { GERROR_STREAM("Errors happened in hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::hodge_decomposition(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::hodge_decomposition_image_coordinate(DeformationFieldType* deform) { try { size_t d; long long sx = (long long)(deform[0].get_size(0)); long long sy = (long long)(deform[0].get_size(1)); long long sz = (long long)(deform[0].get_size(2)); long long x, y, z; // the deformation field is in the unit of pixel for (d = 0; d < D; d++) { Gadgetron::real_to_complex(deform[d], deform_cplx_[d]); if (!debugFolder_.empty()) { std::ostringstream ostr; ostr << "deform_cplx_" << d; gt_exporter_.export_array_complex(deform_cplx_[d], debugFolder_ + ostr.str()); } DeformFLTCplxType deform_flt, deform_fft_flt; deform_flt.copyFrom(deform_cplx_[d]); if (D == 2) { Gadgetron::hoNDFFT<float>::instance()->fft2(deform_flt, deform_fft_flt); deform_fft_cplx_[d].copyFrom(deform_fft_flt); } else if (D == 3) { Gadgetron::hoNDFFT<float>::instance()->fft3(deform_flt, deform_fft_flt); deform_fft_cplx_[d].copyFrom(deform_fft_flt); } else { for (size_t d2 = 0; d2 < D; d2++) { Gadgetron::hoNDFFT<CoordType>::instance()->fft( &deform_cplx_[d], d2); } deform_fft_cplx_[d] = deform_cplx_[d]; } if (!debugFolder_.empty()) { std::ostringstream ostr; ostr << "deform_fft_cplx_" << d; gt_exporter_.export_array_complex(deform_fft_cplx_[d], debugFolder_ + ostr.str()); } } // ref [4], page 78, first equation, computing the divergence free field // e.g. the discrete fourier transform is exp(-j*2*pi*(kx*x/sx + ky*y/sy)) if (D == 2) { CoordType dx = (CoordType)( 2 * M_PI / sx ); CoordType dy = (CoordType)( 2 * M_PI / sy ); for (y = 0; y < sy; y++) { CoordType ky = (y < sy/2) ? y : y - sy; CoordType fy = ky * dy; for (x = 0; x < sx; x++) { size_t offset = x + y*sx; CoordType kx = (x < sx / 2) ? x : x - sx; CoordType fx = kx * dx; std::complex<CoordType> vx = deform_fft_cplx_[0](offset); std::complex<CoordType> vy = deform_fft_cplx_[1](offset); if ( (x!=0) || (y!=0)) { std::complex<CoordType> s1 = fx * vx + fy * vy; std::complex<CoordType> s2 = fx * fx + fy * fy; std::complex<CoordType> s3 = s1 / s2; deform_fft_buf_cplx_[0](offset) = vx - fx*s3; deform_fft_buf_cplx_[1](offset) = vy - fy*s3; } else { deform_fft_buf_cplx_[0](offset) = vx; deform_fft_buf_cplx_[1](offset) = vy; } } } } else if (D == 3) { CoordType dx = (CoordType)(2 * M_PI / sx); CoordType dy = (CoordType)(2 * M_PI / sy); CoordType dz = (CoordType)(2 * M_PI / sz); #pragma omp parallel for private(z, y, x) shared(sx, sy, sz) for (z = 0; z < sz; z++) { CoordType kz = z; if (z>=sz/2) kz = z - sz; CoordType fz = kz * dz; for (y = 0; y < sy; y++) { CoordType ky = y; if (y >= sy / 2) ky = y - sy; CoordType fy = ky * dy; for (x = 0; x < sx; x++) { size_t offset = x + y*sx + z*sx*sy; CoordType kx = x; if (x >= sx / 2) kx = x - sx; CoordType fx = kx * dx; std::complex<CoordType> vx = deform_fft_cplx_[0](offset); std::complex<CoordType> vy = deform_fft_cplx_[1](offset); std::complex<CoordType> vz = deform_fft_cplx_[2](offset); if ((x != 0) || (y != 0) || (z != 0)) { std::complex<CoordType> s1 = fx * vx + fy * vy + fz * vz; std::complex<CoordType> s2 = fx * fx + fy * fy + fz * fz; std::complex<CoordType> s3 = s1 / s2; deform_fft_buf_cplx_[0](offset) = vx - fx*s3; deform_fft_buf_cplx_[1](offset) = vy - fy*s3; deform_fft_buf_cplx_[2](offset) = vz - fz*s3; } else { deform_fft_buf_cplx_[0](offset) = vx; deform_fft_buf_cplx_[1](offset) = vy; deform_fft_buf_cplx_[2](offset) = vz; } } } } } else { size_t N = deform[0].get_number_of_elements(); long long n; CoordType dd[D]; for (size_t ii = 0; ii<D; ii++) { dd[ii] = (CoordType)(2 * M_PI / deform[0].get_size(ii)); } #pragma omp parallel default(none) private(n) shared(dd, N) { size_t ind[D]; CoordType kk[D]; CoordType ff[D]; std::complex<CoordType> vv[D]; #pragma omp for for (n = 0; n<(long long)N; n++) { deform_fft_cplx_[0].calculate_index(n, ind); size_t ii; for (ii = 0; ii<D; ii++) { kk[ii] = ind[ii]; if (kk[ii] >= deform_fft_cplx_[0].get_size(ii) / 2) kk[ii] = ind[ii] - deform_fft_cplx_[0].get_size(ii); ff[ii] = kk[ii] * dd[ii]; vv[ii] = deform_fft_cplx_[ii]( (size_t)n ); } std::complex<CoordType> s1(0), s2(0); for (ii = 0; ii<D; ii++) { s1 += ff[ii] * vv[ii]; s2 += ff[ii] * ff[ii]; } if (s2.real()>0) { std::complex<CoordType> s3 = s1 / s2; for (ii = 0; ii<D; ii++) { deform_fft_buf_cplx_[ii](n) = vv[ii] - ff[ii] * s3; } } else { for (ii = 0; ii<D; ii++) { deform_fft_buf_cplx_[ii](n) = vv[ii]; } } } } } for (d = 0; d < D; d++) { if (!debugFolder_.empty()) { std::ostringstream ostr; ostr << "deform_fft_buf_cplx_" << d; gt_exporter_.export_array_complex(deform_fft_buf_cplx_[d], debugFolder_ + ostr.str()); } DeformFLTCplxType deform_flt, deform_fft_flt; deform_fft_flt.copyFrom(deform_fft_buf_cplx_[d]); if (D == 2) { Gadgetron::hoNDFFT<float>::instance()->ifft2(deform_fft_flt, deform_flt); deform_cplx_[d].copyFrom(deform_flt); Gadgetron::complex_to_real(deform_cplx_[d], deform[d]); if (!debugFolder_.empty()) { std::ostringstream ostr; ostr << "deform_cplx_hodge_" << d; gt_exporter_.export_array_complex(deform_cplx_[d], debugFolder_ + ostr.str()); } } else if (D == 3) { Gadgetron::hoNDFFT<float>::instance()->ifft3(deform_fft_flt, deform_flt); deform_cplx_[d].copyFrom(deform_flt); Gadgetron::complex_to_real(deform_cplx_[d], deform[d]); if (!debugFolder_.empty()) { std::ostringstream ostr; ostr << "deform_cplx_hodge_" << d; gt_exporter_.export_array_complex(deform_cplx_[d], debugFolder_ + ostr.str()); } } else { for (size_t d2 = 0; d2 < D; d2++) { Gadgetron::hoNDFFT<CoordType>::instance()->ifft( &deform_fft_buf_cplx_[d], d2); Gadgetron::complex_to_real(deform_fft_buf_cplx_[d], deform[d]); } if (!debugFolder_.empty()) { std::ostringstream ostr; ostr << "deform_cplx_hodge_" << d; gt_exporter_.export_array_complex(deform_fft_buf_cplx_[d], debugFolder_ + ostr.str()); } } } } catch (...) { GERROR_STREAM("Errors happened in hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::hodge_decomposition_image_coordinate(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> void hoImageRegDeformationFieldSolver<TargetType, SourceType, CoordType>::print(std::ostream& os) const { using namespace std; os << "--------------Gagdgetron image registration non-parametric solver for pixel-wise deformation field -------------" << endl; os << "Image dimension is : " << D << endl; os << "Image data type is : " << std::string(typeid(ValueType).name()) << std::endl; os << "Transformation data type is : " << std::string(typeid(CoordType).name()) << std::endl; os << "Use world coordinate is : " << use_world_coordinate_ << std::endl; os << "Maximal iteration number is : " << max_iter_num_ << std::endl; os << "Dissimilarity threshold is : " << dissimilarity_thres_ << std::endl; os << "Parameter threshold is : " << parameter_thres_ << std::endl; os << "Number of search size division is : " << div_num_ << std::endl; os << "Solver step size is : " << step_size_para_ << std::endl; os << "Step size division ratio is : " << step_size_div_para_ << std::endl; } } #endif // hoImageRegDeformationFieldSolver_H_
omp_nested.c
/* Test if the compiler support nested parallelism By Chunhua Liao, University of Houston Oct. 2005 */ #include <stdio.h> #include "omp.h" #include "omp_testsuite.h" int check_omp_nested( FILE *logFile) { int counter =0 ; #ifdef _OPENMP omp_set_nested(1); #endif #pragma omp parallel shared(counter) { #pragma omp critical counter ++; #pragma omp parallel { #pragma omp critical counter --; } } return (counter!=0); } int crosscheck_omp_nested( FILE *logFile) { int counter =0 ; #ifdef _OPENMP omp_set_nested(0); #endif #pragma omp parallel shared(counter) { #pragma omp critical counter ++; #pragma omp parallel { #pragma omp critical counter --; } } return (counter!=0); }
LossSoftmaxCrossEntropy.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018-2019 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <vector> #include <valarray> #include "bb/LossFunction.h" namespace bb { template <typename T = float> class LossSoftmaxCrossEntropy : public LossFunction { using _super = LossFunction; public: static inline std::string LossFunctionName(void) { return "LossSoftmaxCrossEntropy"; } static inline std::string ObjectName(void){ return LossFunctionName() + "_" + DataType<T>::Name(); } std::string GetLossFunctionName(void) const override { return LossFunctionName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: Tensor_<double> m_loss_buf; double m_loss; double m_frame_count = 0; protected: LossSoftmaxCrossEntropy() { // m_loss.Resize(1); Clear(); } public: ~LossSoftmaxCrossEntropy() {} static std::shared_ptr<LossSoftmaxCrossEntropy> Create(void) { auto self = std::shared_ptr<LossSoftmaxCrossEntropy>(new LossSoftmaxCrossEntropy); return self; } void Clear(void) { m_loss = 0; m_frame_count = 0; } double GetLoss(void) const { if ( m_frame_count == 0 ) { return 0; } return m_loss / m_frame_count; } FrameBuffer CalculateLoss(FrameBuffer y_buf, FrameBuffer t_buf, index_t batch_size) { BB_ASSERT(y_buf.GetType() == DataType<T>::type); BB_ASSERT(t_buf.GetType() == DataType<T>::type); BB_ASSERT(y_buf.GetNodeSize() == t_buf.GetNodeSize()); BB_ASSERT(y_buf.GetFrameSize() == t_buf.GetFrameSize()); FrameBuffer dy_buf(y_buf.GetFrameSize(), y_buf.GetShape(), y_buf.GetType()); m_loss_buf.Resize(y_buf.GetFrameSize()); index_t frame_size = t_buf.GetFrameSize(); index_t node_size = t_buf.GetNodeSize(); auto shape = t_buf.GetShape(); auto ch_size = shape[0]; auto pix_size = node_size / ch_size; #ifdef BB_WITH_CUDA if ( DataType<T>::type == BB_TYPE_FP32 && pix_size == 1 && y_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { T t_sum = (T)t_buf.Sum(); { auto y_ptr = y_buf.LockDeviceMemoryConst(); auto t_ptr = t_buf.LockDeviceMemoryConst(); auto dy_ptr = dy_buf.LockDeviceMemory(true); auto loss_buf_ptr = m_loss_buf.LockDeviceMemory(true); bbcu_LossSoftmaxCrossEntropy<T> ( (T const *)y_ptr.GetAddr(), (T const *)t_ptr.GetAddr(), (T *)dy_ptr.GetAddr(), (double *)loss_buf_ptr.GetAddr(), (T )t_sum, (int )pix_size, (int )ch_size, (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)) ); } m_loss += -m_loss_buf.Sum(); m_frame_count += t_sum; return dy_buf; } #endif #if 0 if ( DataType<T>::type == BB_TYPE_FP32 && pix_size == 1 && y_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto y_ptr = y_buf.LockDeviceMemoryConst(); auto t_ptr = t_buf.LockDeviceMemoryConst(); auto dy_ptr = dy_buf.LockDeviceMemory(true); auto loss_buf_ptr = m_loss_buf.LockDeviceMemory(true); auto loss_ptr = m_loss.LockDeviceMemory(); bbcu_fp32_LossSoftmaxCrossEntropy ( (float const *)y_ptr.GetAddr(), (float const *)t_ptr.GetAddr(), (float *)dy_ptr.GetAddr(), (double *)loss_buf_ptr.GetAddr(), (double *)loss_ptr.GetAddr(), (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)), (int )batch_size ); m_frame_count += y_buf.GetFrameSize(); return dy_buf; } #endif { T eps = (T)1.0e-7; m_loss_buf = 0; auto y_ptr = y_buf.LockConst<T>(); auto t_ptr = t_buf.LockConst<T>(); auto dy_ptr = dy_buf.Lock<T>(true); auto loss_buf_ptr = m_loss_buf.Lock(true); T t_sum = 0; for (index_t frame = 0; frame < frame_size; ++frame) { for (index_t node = 0; node < node_size; ++node) { t_sum += t_ptr.Get(frame, node); } } #pragma omp parallel for for (index_t frame = 0; frame < frame_size; ++frame) { for (index_t pix = 0; pix < pix_size; ++pix) { // max T c = std::numeric_limits<T>::lowest(); for (index_t ch = 0; ch < ch_size; ++ch) { auto node = ch * pix_size + pix; c = std::max(c, y_ptr.Get(frame, node)); } // if (!Real_IsValid(c)) { //// std::cout << "loss c : nan" << std::endl; // c = 0; // } // sum(exp(y - c)) T y_sum = 0; T t_max = 0; for (index_t ch = 0; ch < ch_size; ++ch) { auto node = ch * pix_size + pix; y_sum += std::exp(y_ptr.Get(frame, node) - c); t_max += t_ptr.Get(frame, node); // ワンホットなので足していけばそのチャネルのWeightが得られる } // 0以下での除算回避 if (y_sum <= eps) { y_sum = eps; } for (index_t ch = 0; ch < ch_size; ++ch) { auto node = ch * pix_size + pix; T y = y_ptr.Get(frame, node); T t = t_ptr.Get(frame, node); T softmax = std::exp(y - c) / y_sum; if ( t > 0) { loss_buf_ptr[frame] += std::log(softmax + eps)*t_max; // t = (T)1.0; } T dy = (t_max * softmax - t) / (T)t_sum; // if (!Real_IsValid(dy)) { //// std::cout << "loss dy : nan" << std::endl; // dy = 0; // } dy_ptr.Set(frame, node, dy); } } } double loss_sum = 0; for ( index_t frame = 0; frame < frame_size; ++frame ) { loss_sum += loss_buf_ptr[frame]; } m_loss += -loss_sum; m_frame_count += t_sum; return dy_buf; } } }; }
GB_emult_02_template.c
//------------------------------------------------------------------------------ // GB_emult_02_template: C = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is sparse, with the same sparsity structure as A. No mask is present, or // M is bitmap/full. A is sparse/hyper, and B is bitmap/full. This method // also handles the case when the original input A is bitmap/full and B is // sparse/hyper, by computing B.*A with the operator flipped. { //-------------------------------------------------------------------------- // get A, B, and C //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const int64_t vlen = A->vlen ; const int8_t *restrict Bb = B->b ; const int64_t *restrict kfirst_Aslice = A_ek_slicing ; const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ; const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ; const bool A_iso = A->iso ; const bool B_iso = B->iso ; #ifdef GB_ISO_EMULT ASSERT (C->iso) ; #else ASSERT (!C->iso) ; ASSERT (!(A_iso && B_iso)) ; // one of A or B can be iso, but not both #if GB_FLIPPED const GB_BTYPE *restrict Ax = (GB_BTYPE *) A->x ; const GB_ATYPE *restrict Bx = (GB_ATYPE *) B->x ; #else const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ; #endif GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif const int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; //-------------------------------------------------------------------------- // C=A.*B or C<#M>=A.*B //-------------------------------------------------------------------------- if (M == NULL) { //---------------------------------------------------------------------- // C = A.*B //---------------------------------------------------------------------- if (GB_IS_BITMAP (B)) { //------------------------------------------------------------------ // C=A.*B where A is sparse/hyper and B is bitmap //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end, pC ; GB_get_pA_and_pC (&pA, &pA_end, &pC, tid, k, kfirst, klast, pstart_Aslice, Cp_kfirst, Cp, vlen, Ap, vlen) ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t pB = pB_start + i ; if (!Bb [pB]) continue ; // C (i,j) = A (i,j) .* B (i,j) Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; #if GB_FLIPPED GB_BINOP (GB_CX (pC), bij, aij, i, j) ; #else GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif #endif pC++ ; } } } } else { //------------------------------------------------------------------ // C=A.*B where A is sparse/hyper and B is full //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end ; GB_get_pA (&pA, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; for ( ; pA < pA_end ; pA++) { // C (i,j) = A (i,j) .* B (i,j) int64_t i = Ai [pA] ; int64_t pB = pB_start + i ; // Ci [pA] = i ; already defined #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; #if GB_FLIPPED GB_BINOP (GB_CX (pA), bij, aij, i, j) ; #else GB_BINOP (GB_CX (pA), aij, bij, i, j) ; #endif #endif } } } } } else { //---------------------------------------------------------------------- // C<#M>=A.*B where A is sparse/hyper, M and B are bitmap/full //---------------------------------------------------------------------- const int8_t *restrict Mb = M->b ; const GB_void *restrict Mx = (Mask_struct) ? NULL : ((GB_void *) M->x) ; const size_t msize = M->type->size ; int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end, pC ; GB_get_pA_and_pC (&pA, &pA_end, &pC, tid, k, kfirst, klast, pstart_Aslice, Cp_kfirst, Cp, vlen, Ap, vlen) ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t pB = pB_start + i ; if (!GBB (Bb, pB)) continue ; bool mij = GBB (Mb, pB) && GB_mcast (Mx, pB, msize) ; mij = mij ^ Mask_comp ; if (!mij) continue ; // C (i,j) = A (i,j) .* B (i,j) Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; #if GB_FLIPPED GB_BINOP (GB_CX (pC), bij, aij, i, j) ; #else GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif #endif pC++ ; } } } } }
pcpdlpverifydsaca.c
/******************************************************************************* * Copyright 2005-2019 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // DL over Prime Finite Field (Verify, DSA version) // // Contents: // ippsDLPVerifyDSA() // // */ #include "owndefs.h" #include "owncp.h" #include "pcpdlp.h" /*F* // Name: ippsDLPVerifyDSA // // Purpose: Verify Signature (DSA version) // // Returns: Reason: // ippStsNullPtrErr NULL == pDL // NULL == pMsgDigest // NULL == pSignR // NULL == pSignS // NULL == pResult // // ippStsContextMatchErr illegal pDL->idCtx // illegal pMsgDigest->idCtx // illegal pSignR->idCtx // illegal pSignS->idCtx // // ippStsIncompleteContextErr // incomplete context // // ippStsMessageErr MsgDigest >= R // MsgDigest < 0 // // ippStsNoErr no errors // // Parameters: // pMsgDigest pointer to the message representative to be signed // pSignR,pSignS pointer to the signature // pResult pointer to the result: IppSignIsValid/IppSignIsInvalid // pDSA pointer to the DL context // // Primitive sequence call: // 1) set up domain parameters // 2) set up (signatory's) public key *F*/ #if !defined(_OPENMP) IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest, const IppsBigNumState* pSignR, const IppsBigNumState* pSignS, IppDLResult* pResult, IppsDLPState* pDL)) { /* test context*/ IPP_BAD_PTR2_RET(pDL,pResult); pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) ); IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr); /* test operation flag */ IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr); /* test message representative */ IPP_BAD_PTR1_RET(pMsgDigest); pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr); IPP_BADARG_RET(BN_NEGATIVE(pMsgDigest), ippStsMessageErr); /* make sure msg <order */ IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr); /* test signature */ IPP_BAD_PTR2_RET(pSignR,pSignS); pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) ); pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr); IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr); /* test signature range */ if(0<cpBN_cmp(cpBN_OneRef(), pSignR)|| 0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } if(0<cpBN_cmp(cpBN_OneRef(), pSignS)|| 0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } { /* allocate BN resources */ BigNumNode* pList = DLP_BNCTX(pDL); IppsBigNumState* pW = cpBigNumListGet(&pList); IppsBigNumState* pU1 = cpBigNumListGet(&pList); IppsBigNumState* pU2 = cpBigNumListGet(&pList); IppsBigNumState* pOrder = cpBigNumListGet(&pList); ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder); /* W = 1/SignS (mod R) */ ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW); cpMontEnc_BN(pW, pW, DLP_MONTR(pDL)); /* reduct pMsgDigest if necessary */ if(0 < cpBN_cmp(pMsgDigest, pOrder)) ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1); else cpBN_copy(pU1, pMsgDigest); /* U1 = (MsgDigest*W) (mod R) */ cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL)); /* U2 = (SignR*W) (mod R) */ cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL)); /* // V = ((G^U1)*(Y^U2) (mod P)) (mod R) */ /* precompute multi-exp table {1, G, Y, G*Y} */ { cpSize pSize = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) ); BNU_CHUNK_T* pX1 = BN_NUMBER(DLP_GENC(pDL)); BNU_CHUNK_T* pX2 = BN_NUMBER(DLP_YENC(pDL)); const BNU_CHUNK_T* ppX[2]; ppX[0] = pX1; ppX[1] = pX2; ZEXPAND_BNU(pX1, BN_SIZE(DLP_GENC(pDL)), pSize); ZEXPAND_BNU(pX2, BN_SIZE(DLP_YENC(pDL)), pSize); cpMontMultiExpInitArray(DLP_METBL(pDL), ppX, pSize*BITSIZE(BNU_CHUNK_T), 2, DLP_MONTP0(pDL)); } /* W = ((G^U1)*(Y^U2) (mod P) */ { cpSize sizeE1 = BN_SIZE(pU1); cpSize sizeE2 = BN_SIZE(pU2); cpSize sizeE = IPP_MAX(sizeE1, sizeE2); BNU_CHUNK_T* pE1 = BN_NUMBER(pU1); BNU_CHUNK_T* pE2 = BN_NUMBER(pU2); const Ipp8u* ppE[2]; ppE[0] = (Ipp8u*)pE1; ppE[1] = (Ipp8u*)pE2; ZEXPAND_BNU(pE1, sizeE1, sizeE); ZEXPAND_BNU(pE2, sizeE2, sizeE); cpFastMontMultiExp(BN_NUMBER(pW), DLP_METBL(pDL), ppE, sizeE*BITSIZE(BNU_CHUNK_T), 2, DLP_MONTP0(pDL)); BN_SIZE(pW) = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) ); BN_SIGN(pW) = ippBigNumPOS; } cpMontDec_BN(pW, pW, DLP_MONTP0(pDL)); BN_SIZE(pW) = cpMod_BNU(BN_NUMBER(pW), BN_SIZE(pW), BN_NUMBER(pOrder), BN_SIZE(pOrder)); /* result = W~R */ *pResult = 0==cpBN_cmp(pW, pSignR)? ippDLValid : ippDLInvalidSignature; return ippStsNoErr; } } //#endif #else IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest, const IppsBigNumState* pSignR, const IppsBigNumState* pSignS, IppDLResult* pResult, IppsDLPState* pDL)) { /* test context*/ IPP_BAD_PTR2_RET(pDL,pResult); pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) ); IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr); /* test operation flag */ IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr); /* test message representative */ IPP_BAD_PTR1_RET(pMsgDigest); pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr); IPP_BADARG_RET((0>cpBN_tst(pMsgDigest)), ippStsMessageErr); /* make sure msg <order */ IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr); /* test signature */ IPP_BAD_PTR2_RET(pSignR,pSignS); pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) ); pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr); IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr); /* test signature range */ if(0<cpBN_cmp(cpBN_OneRef(), pSignR)|| 0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } if(0<cpBN_cmp(cpBN_OneRef(), pSignS)|| 0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } { /* allocate BN resources */ BigNumNode* pList = DLP_BNCTX(pDL); IppsBigNumState* pV = cpBigNumListGet(&pList); IppsBigNumState* pW = cpBigNumListGet(&pList); IppsBigNumState* pU1 = cpBigNumListGet(&pList); IppsBigNumState* pU2 = cpBigNumListGet(&pList); IppsBigNumState* pOrder = cpBigNumListGet(&pList); ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder); //int maxNumThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), 2); /* W = 1/SignS (mod R) */ ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW); cpMontEnc_BN(pW, pW, DLP_MONTR(pDL)); /* reduct pMsgDigest if necessary */ if(0 < cpBN_cmp(pMsgDigest, pOrder)) ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1); else cpBN_copy(pU1, pMsgDigest); /* U1 = (MsgDigest*W) (mod R) */ cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL)); /* U2 = (SignR*W) (mod R) */ cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL)); /* V = ((G^U1)*(Y^U2) (mod P)) (mod R) */ #pragma omp parallel sections IPPCP_OMP_LIMIT_MAX_NUM_THREADS(2) { /* W = (G^U1) (mod P) */ #pragma omp section { #if !defined(_USE_WINDOW_EXP_) //cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL)); cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) ); #else if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU1), BN_SIZE(pU1))))) //cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL)); cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) ); else //cpSafeMontExp_Window(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL)); cpMontExpWin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL)); #endif } /* V = (Y^U2) (mod P) */ #pragma omp section { #if !defined(_USE_WINDOW_EXP_) //cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL)); cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) ); #else if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU2), BN_SIZE(pU2))))) //cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL)); cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) ); else //cpSafeMontExp_Window(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL)); cpMontExpWin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL)); #endif } } cpMontMul_BN(pV, pW, pV, DLP_MONTP0(pDL)); cpMontDec_BN(pV, pV, DLP_MONTP0(pDL)); BN_SIZE(pV) = cpMod_BNU(BN_NUMBER(pV), BN_SIZE(pV), BN_NUMBER(pOrder), BN_SIZE(pOrder)); /* result = V~R */ *pResult = 0==cpBN_cmp(pV, pSignR)? ippDLValid : ippDLInvalidSignature; return ippStsNoErr; } } #endif /* _OPENMP */
BRKGA.h
/* * BRKGA.h * * This class encapsulates a Biased Random-key Genetic Algorithm (for minimization problems) with K * independent Populations stored in two vectors of Population, current and previous. It supports * multi-threading via OpenMP, and implements the following key methods: * * - BRKGA() constructor: initializes the populations with parameters described below. * - evolve() operator: evolve each Population following the BRKGA methodology. This method * supports OpenMP to evolve up to K independent Populations in parallel. * Please note that double Decoder::decode(...) MUST be thread-safe. * * Required hyperparameters: * - n: number of genes in each chromosome * - p: number of elements in each population * - pe: pct of elite items into each population * - pm: pct of mutants introduced at each generation into the population * - rhoe: probability that an offspring inherits the allele of its elite parent * * Optional parameters: * - K: number of independent Populations * - MAX_THREADS: number of threads to perform parallel decoding -- WARNING: Decoder::decode() MUST * be thread-safe! * * Required templates are: * RNG: random number generator that implements the methods below. * - RNG(unsigned long seed) to initialize a new RNG with 'seed' * - double rand() to return a double precision random deviate in range [0,1) * - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1) * - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32 * * Decoder: problem-specific decoder that implements any of the decode methods outlined below. When * compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via * OpenMP), the method must be thread-safe. * - double decode(const vector< double >& chromosome) const, if you don't want to change * chromosomes inside the framework, or * - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome * * Created on : Jun 22, 2010 by rtoso * Last update: Sep 28, 2010 by rtoso * Authors: Rodrigo Franco Toso <rtoso@cs.rutgers.edu> */ #ifndef BRKGA_H #define BRKGA_H #include <omp.h> #include <algorithm> #include <exception> #include <stdexcept> #include "Population.h" template< class Decoder, class RNG > class BRKGA { public: /* * Default constructor * Required hyperparameters: * - n: number of genes in each chromosome * - p: number of elements in each population * - pe: pct of elite items into each population * - pm: pct of mutants introduced at each generation into the population * - rhoe: probability that an offspring inherits the allele of its elite parent * * Optional parameters: * - K: number of independent Populations * - MAX_THREADS: number of threads to perform parallel decoding * WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as * + double Decoder::decode(std::vector< double >& chromosome) const */ BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe, const Decoder& refDecoder, RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1); /** * Destructor */ ~BRKGA(); /** * Resets all populations with brand new keys */ void reset(); /** * Evolve the current populations following the guidelines of BRKGAs * @param generations number of generations (must be even and nonzero) * @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization) * @param M number of elite chromosomes to select from each population in order to exchange */ void evolve(unsigned generations = 1); /** * Exchange elite-solutions between the populations * @param M number of elite chromosomes to select from each population */ void exchangeElite(unsigned M); /** * Set individuals to initial population (only one population in case of multiple ones). * @param chromosomes a set of individuals described as double vectors * between 0 and 1. */ void setInitialPopulation(const std::vector< std::vector< double > >& chromosomes, int ini_population_size, int population); /** * Returns the current population */ const Population& getPopulation(unsigned k = 0) const; /** * Returns the chromosome with best fitness so far among all populations */ const std::vector< double >& getBestChromosome() const; /** * Returns the best fitness found so far among all populations */ double getBestFitness() const; // Return copies to the internal parameters: unsigned getN() const; unsigned getP() const; unsigned getPe() const; unsigned getPm() const; unsigned getPo() const; double getRhoe() const; unsigned getK() const; unsigned getMAX_THREADS() const; private: // Hyperparameters: const unsigned n; // number of genes in the chromosome const unsigned p; // number of elements in the population const unsigned pe; // number of elite items in the population const unsigned pm; // number of mutants introduced at each generation into the population const double rhoe; // probability that an offspring inherits the allele of its elite parent // Templates: RNG& refRNG; // reference to the random number generator const Decoder& refDecoder; // reference to the problem-dependent Decoder // Parallel populations parameters: const unsigned K; // number of independent parallel populations const unsigned MAX_THREADS; // number of threads for parallel decoding // Data: std::vector< Population* > previous; // previous populations std::vector< Population* > current; // current populations // Local operations: void initialize(const unsigned i); // initialize current population 'i' with random keys void evolution(Population& curr, Population& next); bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const; }; template< class Decoder, class RNG > BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe, const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) : n(_n), p(_p), pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe), refRNG(rng), refDecoder(decoder), K(_K), MAX_THREADS(MAX), previous(K, 0), current(K, 0) { // Error check: using std::range_error; if(n == 0) { throw range_error("Chromosome size equals zero."); } if(p == 0) { throw range_error("Population size equals zero."); } if(pe == 0) { throw range_error("Elite-set size equals zero."); } if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); } if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); } if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); } if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); } // Initialize and decode each chromosome of the current population, then copy to previous: for(unsigned i = 0; i < K; ++i) { // Allocate: current[i] = new Population(n, p); // Initialize: initialize(i); // Then just copy to previous: previous[i] = new Population(*current[i]); } } template< class Decoder, class RNG > BRKGA< Decoder, RNG >::~BRKGA() { for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; } } template< class Decoder, class RNG > const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const { return (*current[k]); } template< class Decoder, class RNG > double BRKGA< Decoder, RNG >::getBestFitness() const { double best = current[0]->fitness[0].first; for(unsigned i = 1; i < K; ++i) { if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; } } return best; } template< class Decoder, class RNG > const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const { unsigned bestK = 0; for(unsigned i = 1; i < K; ++i) { if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; } } return current[bestK]->getChromosome(0); // The top one :-) } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::reset() { for(unsigned i = 0; i < K; ++i) { initialize(i); } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::evolve(unsigned generations) { if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); } for(unsigned i = 0; i < generations; ++i) { for(unsigned j = 0; j < K; ++j) { evolution(*current[j], *previous[j]); // First evolve the population (curr, next) std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next) } } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) { if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); } for(unsigned i = 0; i < K; ++i) { // Population i will receive some elite members from each Population j below: unsigned dest = p - 1; // Last chromosome of i (will be updated below) for(unsigned j = 0; j < K; ++j) { if(j == i) { continue; } // Copy the M best of Population j into Population i: for(unsigned m = 0; m < M; ++m) { // Copy the m-th best of Population j into the 'dest'-th position of Population i: const std::vector< double >& bestOfJ = current[j]->getChromosome(m); std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin()); current[i]->fitness[dest].first = current[j]->fitness[m].first; --dest; } } } for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::setInitialPopulation(const std::vector< std::vector< double > >& chromosomes, int ini_population_size, int population) { //current[0] = new Population(n, chromosomes.size()); unsigned i = 0; for(std::vector< std::vector< double > >::const_iterator it_chrom = chromosomes.begin(); it_chrom != chromosomes.end() && i < ini_population_size; ++it_chrom, ++i) { if(it_chrom->size() != n) { throw std::runtime_error("Error on setting initial population: number of genes isn't equal!"); } std::copy(it_chrom->begin(), it_chrom->end(), current[population]->population[i].begin()); //std::cout << "VOU CALCULAR CUSTO" << std::endl; current[population]->setFitness(i, refDecoder.decode((*current[population])(i)) ); } current[population]->sortFitness(); } template< class Decoder, class RNG > inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) { for(unsigned j = 0; j < p; ++j) { for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); } } // Decode: #ifdef _OPENMP #pragma omp parallel for num_threads(MAX_THREADS) #endif for(int j = 0; j < int(p); ++j) { current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) ); } // Sort: current[i]->sortFitness(); } template< class Decoder, class RNG > inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) { // We now will set every chromosome of 'current', iterating with 'i': unsigned i = 0; // Iterate chromosome by chromosome unsigned j = 0; // Iterate allele by allele // 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current': while(i < pe) { for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); } next.fitness[i].first = curr.fitness[i].first; next.fitness[i].second = i; ++i; } // 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm: while(i < p - pm) { // Select an elite parent: const unsigned eliteParent = (refRNG.randInt(pe - 1)); // Select a non-elite parent: const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1)); // Mate: for(j = 0; j < n; ++j) { const unsigned sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent); next(i, j) = curr(curr.fitness[sourceParent].second, j); //next(i, j) = (refRNG.rand() < rhoe) ? curr(curr.fitness[eliteParent].second, j) : // curr(curr.fitness[noneliteParent].second, j); } ++i; } // We'll introduce 'pm' mutants: while(i < p) { for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); } ++i; } // Time to compute fitness, in parallel: #ifdef _OPENMP #pragma omp parallel for num_threads(MAX_THREADS) #endif for(int i = int(pe); i < int(p); ++i) { next.setFitness( i, refDecoder.decode(next.population[i]) ); } // Now we must sort 'current' by fitness, since things might have changed: next.sortFitness(); } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getN() const { return n; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getP() const { return p; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; } template< class Decoder, class RNG > double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getK() const { return K; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
flexDiagonalOperator.h
#ifndef flexDiagonalOperator_H #define flexDiagonalOperator_H #include <vector> #include "flexLinearOperator.h" //! represents a diagonal operator template <typename T> class flexDiagonalOperator : public flexLinearOperator<T> { #ifdef __CUDACC__ typedef thrust::device_vector<T> Tdata; #else typedef std::vector<T> Tdata; #endif private: Tdata diagonalElements; public: //! initializes the concatenation operator for non-CUDA versions /*! \param aDiagonalElements vector of diagonal Elements \param aMinus determines if operator is negated \sa isMinus */ flexDiagonalOperator(std::vector<T> aDiagonalElements, bool aMinus) : flexLinearOperator<T>(static_cast<int>(aDiagonalElements.size()), static_cast<int>(aDiagonalElements.size()), diagonalOp, aMinus) { this->diagonalElements.resize(aDiagonalElements.size()); #ifdef __CUDACC__ thrust::copy(aDiagonalElements.begin(), aDiagonalElements.end(), this->diagonalElements.begin()); #else this->diagonalElements = aDiagonalElements; #endif } #ifdef __CUDACC__ //! initializes the concatenation operator for CUDA versions /*! \param aDiagonalElements vector of diagonal Elements where Tdata is of type thrust::device_vector<T> \param aMinus determines if operator is negated \sa isMinus */ flexDiagonalOperator(Tdata aDiagonalElements, bool aMinus) : diagonalElements(aDiagonalElements), flexLinearOperator<T>(static_cast<int>(aDiagonalElements.size()), static_cast<int>(aDiagonalElements.size()), diagonalOp, aMinus) { }; #endif flexDiagonalOperator<T>* copy() { flexDiagonalOperator<T>* A = new flexDiagonalOperator<T>(this->diagonalElements, this->isMinus); return A; } #ifdef __CUDACC__ struct flexDiagonalOperatorFunctor { __host__ __device__ flexDiagonalOperatorFunctor(const mySign _s) : s(_s){} template <typename Tuple> __host__ __device__ void operator()(Tuple t) { switch (this->s) { case PLUS: { thrust::get<0>(t) += thrust::get<1>(t) * thrust::get<2>(t); break; } case MINUS: { thrust::get<0>(t) -= thrust::get<1>(t) * thrust::get<2>(t); break; } case EQUALS: { thrust::get<0>(t) = thrust::get<1>(t) * thrust::get<2>(t); break; } } } mySign s; }; #endif //apply linear operator to vector void times(bool transposed, const Tdata &input, Tdata &output) { this->doTimes(input,output,EQUALS); } void timesPlus(bool transposed, const Tdata &input, Tdata &output) { if (this->isMinus) { this->doTimes(input,output, MINUS); } else { this->doTimes(input,output, PLUS); } } void timesMinus(bool transposed, const Tdata &input, Tdata &output) { if (this->isMinus) { this->doTimes(input,output, PLUS); } else { this->doTimes(input,output, MINUS); } } std::vector<T> getAbsRowSum(bool transposed) { std::vector<T> result(this->getNumRows()); #pragma omp parallel for for (int k = 0; k < this->getNumRows(); ++k) { result[k] = std::abs(this->diagonalElements[k]); } return result; } T getMaxRowSumAbs(bool transposed) { Tdata diagonalElementsCopy = this->diagonalElements; vectorAbs(diagonalElementsCopy); return vectorMax(diagonalElementsCopy); } #ifdef __CUDACC__ thrust::device_vector<T> getAbsRowSumCUDA(bool transposed) { Tdata diagonalElementsCopy = this->diagonalElements; vectorAbs(diagonalElementsCopy); return diagonalElementsCopy; } #endif private: void doTimesCPU(const Tdata &input, Tdata &output,const mySign s) { int numElements = (int)output.size(); #pragma omp parallel for for (int i = 0; i < numElements; ++i) { switch (s) { case PLUS: { output[i] += input[i] * this->diagonalElements[i]; break; } case MINUS: { output[i] -= input[i] * this->diagonalElements[i]; break; } case EQUALS: { output[i] = input[i] * this->diagonalElements[i]; break; } } } } void doTimes(const Tdata &input, Tdata &output,const mySign s) { #ifdef __CUDACC__ thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(output.begin(), input.begin(), this->diagonalElements.begin())), thrust::make_zip_iterator(thrust::make_tuple(output.end(), input.end(), this->diagonalElements.end())), flexDiagonalOperatorFunctor(s)); #else this->doTimesCPU(input,output,s); #endif } }; #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
shared-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif main() { int i, n = 7; int a[n]; for (i=0; i<n; i++) a[i] = i+1; // none significa que cualquier variable usada en una región //paralela que no estén con private (OpenMP), //shared (OpenMP), reduction, firstprivate, o la cláusula de lastprivate //provocará un error del compilador #pragma omp parallel for default(none)//shared(a) for (i=0; i<n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i=0; i<n; i++) printf("a[%d] = %d\n",i,a[i]); }
Typing.h
#ifndef INCLUDE_BAYESTYPING_TYPING_H #define INCLUDE_BAYESTYPING_TYPING_H #include <vector> #include <seqan/sequence.h> #include <omp.h> #include "options.h" class Typing { private: int readlen; int reflen; int** scoreArr; vector<int> candAllele; private: void calMaxHit(); //void pickCandidateAllele(double filter_threshold); public: vector<PairAlleles> answers; vector<int> maxHit; // max scores for each reads gittest public: void bayesTyping(int mnoise); int readTyping(CharString inTypingFile); int writeTyping(CharString outTypingFile); Typing(int **inScoreArr,int inReadlen, int inReflen); void pickCandidateAllele(double filter_threshold); }; Typing::Typing(int **inScoreArr,int inReadlen, int inReflen){ scoreArr = inScoreArr; readlen = inReadlen; reflen = inReflen; maxHit=vector<int>(readlen,0); } void Typing::calMaxHit(){ // max scores for each reads for(unsigned k=0;k<readlen;k++){ int maxscore=0; for(int ci=0;ci<candAllele.size();ci++){ int i=candAllele[ci]; if(scoreArr[k][i]>maxscore) maxscore=scoreArr[k][i]; } if(maxscore>0){ maxHit[k] = maxscore; } } } void Typing::bayesTyping(int mnoise){ calMaxHit(); vector<int> candRead; vector<PairAlleles> tmpanswers; for(unsigned k=0;k<readlen;k++){ if(maxHit[k]>0){ candRead.push_back(k); } } int maxalign=0; //SEQAN_OMP_PRAGMA(parallel for) for(unsigned ci=0;ci<candAllele.size()-1;ci++){ vector<int> difv; for(int cj=ci+1;cj<candAllele.size();cj++){ int sum=0; int i = candAllele[ci]; int j = candAllele[cj]; difv.clear(); for(int ck=0;ck<candRead.size();ck++){ int k=candRead[ck]; int maxscore=0; if(scoreArr[k][i]>scoreArr[k][j]){ maxscore=scoreArr[k][i]; } else{ maxscore=scoreArr[k][j]; } if(maxHit[k]>maxscore) difv.push_back(maxHit[k]-maxscore); sum+=maxscore; } sort(difv.begin(),difv.end(),greater<int>()); for(int k=0;k<mnoise && k<difv.size();k++) sum+=difv[k]; // #pragma omp critical(dataupdate) if(sum>=maxalign){ // prepare for the outputs maxalign = sum; if(mnoise==0) tmpanswers.push_back(PairAlleles(i,j,INT_MAX,sum)); else tmpanswers.push_back(PairAlleles(i,j,difv[mnoise-1],sum)); } } } for (vector<PairAlleles>::iterator it = tmpanswers.begin() ; it != tmpanswers.end(); ++it){ if(it->score==maxalign) answers.push_back(*it); } } void Typing::pickCandidateAllele(double filter_threshold){ vector<int> matchReads(reflen,0); // #reads with max score to a allele int maxscore; for(unsigned k=0;k<readlen;k++){ maxscore=0; for(unsigned i=0;i<reflen;i++){ if(scoreArr[k][i]>maxscore) maxscore=scoreArr[k][i]; } for(unsigned i=0;i<reflen;i++){ if(scoreArr[k][i]==maxscore) matchReads[i]+=1; } } for(unsigned i=0;i<reflen;i++){ if(matchReads[i]>filter_threshold*(double)readlen){ candAllele.push_back(i); } } } int Typing::writeTyping(CharString outTypingFile){ ofstream ofs (toCString(outTypingFile)); if (ofs.is_open()) { for(unsigned i=0;i<answers.size();i++){ ofs << answers[i].allele1 << "\t" << answers[i].allele2 << "\t" << answers[i].maxDiff << "\t" << answers[i].score << endl; } ofs.close(); return 1; } else{ return 0; } } int Typing::readTyping(CharString inTypingFile){ ifstream ifs; ifs.open (toCString(inTypingFile), ifstream::in); answers.clear(); if (ifs.is_open()) { int allele1,allele2,maxDiff,score; while ((ifs >> allele1 >> allele2 >> maxDiff >> score).good()) { answers.push_back(PairAlleles(allele1,allele2,maxDiff,score)); } ifs.close(); return 1; } else return 0; } #endif
main.c
/** * @file main.c * @brief * * * @author Yu Li, liyu@tjufe.edu.cn * * Created: 2020/9/13 * Revision: none */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "ops.h" int TestAppCCS (int argc, char *argv[]); int TestAppLAPACK(int argc, char *argv[]); int TestAppHYPRE (int argc, char *argv[]); int TestAppPHG (int argc, char *argv[]); int TestAppSLEPC (int argc, char *argv[]); int TestAppPAS_LAPACK(int argc, char *argv[]); int TestAppPAS_CCS (int argc, char *argv[]); int main(int argc, char *argv[]) { #if OPS_USE_MEMWATCH mwStatistics( 2 ); #endif #if OPS_USE_OMP #pragma omp parallel num_threads(OMP_NUM_THREADS) { int id = omp_get_thread_num(); printf("%d thread\n",id); } #endif //TestAppLAPACK(argc, argv); TestAppCCS(argc, argv); //TestAppHYPRE(argc, argv); //TestAppPHG(argc, argv); //TestAppSLEPC(argc, argv); /* create a PAS matrix to test */ //TestAppPAS_LAPACK(argc, argv); //TestAppPAS_CCS (argc, argv); //TestAppPAS_SLEPC (argc, argv); return 0; }
helloworld.c
#include <stdio.h> #include <omp.h> void printHello(int threadID) { printf("Hello World! %d\n",threadID ); } int main() { #pragma omp parallel { int ID=omp_get_thread_num(); printHello(ID); } return 0; }
SplineR2RAdoptor.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EINSPLINE_R2RSOA_ADOPTOR_H #define QMCPLUSPLUS_EINSPLINE_R2RSOA_ADOPTOR_H #include <OhmmsSoA/Container.h> #include <spline2/MultiBspline.hpp> #include <spline2/MultiBsplineEval.hpp> #include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h" namespace qmcplusplus { /** adoptor class to match ST real spline with TT real SPOs * @tparam ST precision of spline * @tparam TT precision of SPOs * @tparam D dimension * * Requires temporage storage and multiplication of the sign of the real part of the phase * Internal storage ST type arrays are aligned and padded. */ template<typename ST, typename TT> struct SplineR2RSoA: public SplineAdoptorBase<ST,3> { static const int D=3; bool IsGamma; using BaseType=SplineAdoptorBase<ST,3>; using SplineType=typename bspline_traits<ST,3>::SplineType; using BCType=typename bspline_traits<ST,3>::BCType; using PointType=typename BaseType::PointType; using SingleSplineType=typename BaseType::SingleSplineType; using vContainer_type=Vector<ST,aligned_allocator<ST> >; using gContainer_type=VectorSoaContainer<ST,3>; using hContainer_type=VectorSoaContainer<ST,6>; using ghContainer_type=VectorSoaContainer<ST,10>; using BaseType::first_spo; using BaseType::last_spo; using SplineAdoptorBase<ST,D>::HalfG; using BaseType::GGt; using BaseType::PrimLattice; using BaseType::kPoints; using BaseType::offset; ///number of points of the original grid int BaseN[3]; ///offset of the original grid, always 0 int BaseOffset[3]; ///multi bspline set MultiBspline<ST>* SplineInst; ///expose the pointer to reuse the reader and only assigned with create_spline ///also used as identifier of shallow copy SplineType* MultiSpline; vContainer_type myV; vContainer_type myL; gContainer_type myG; hContainer_type myH; ghContainer_type mygH; SplineR2RSoA(): BaseType(), SplineInst(nullptr), MultiSpline(nullptr) { this->is_complex=false; this->is_soa_ready=true; this->AdoptorName="SplineR2RSoAAdoptor"; this->KeyWord="SplineR2RSoA"; } SplineR2RSoA(const SplineR2RSoA& a): SplineAdoptorBase<ST,3>(a),SplineInst(a.SplineInst),MultiSpline(nullptr) { const size_t n=a.myV.size(); myV.resize(n); myG.resize(n); myL.resize(n); myH.resize(n); mygH.resize(n); } ~SplineR2RSoA() { if(MultiSpline != nullptr) delete SplineInst; } inline void resizeStorage(size_t n, size_t nvals) { BaseType::init_base(n); const size_t npad=getAlignedSize<ST>(n); myV.resize(npad); myG.resize(npad); myL.resize(npad); myH.resize(npad); mygH.resize(npad); IsGamma=( (HalfG[0]==0) && (HalfG[1]==0) && (HalfG[2]==0)); } void bcast_tables(Communicate* comm) { chunked_bcast(comm, MultiSpline); } void gather_tables(Communicate* comm) { if(comm->size()==1) return; const int Nbands = kPoints.size(); const int Nbandgroups = comm->size(); offset.resize(Nbandgroups+1,0); FairDivideLow(Nbands,Nbandgroups,offset); gatherv(comm, MultiSpline, MultiSpline->z_stride, offset); } template<typename GT, typename BCT> void create_spline(GT& xyz_g, BCT& xyz_bc) { GGt=dot(transpose(PrimLattice.G),PrimLattice.G); SplineInst=new MultiBspline<ST>(); SplineInst->create(xyz_g,xyz_bc,myV.size()); MultiSpline=SplineInst->spline_m; for(size_t i=0; i<D; ++i) { BaseOffset[i]=0; BaseN[i]=xyz_g[i].num+3; } qmc_common.memory_allocated += SplineInst->sizeInByte(); } inline void flush_zero() { SplineInst->flush_zero(); } inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level) { SplineInst->copy_spline(spline_r, ispline, BaseOffset, BaseN); } void set_spline(ST* restrict psi_r, ST* restrict psi_i, int twist, int ispline, int level) { Vector<ST> v_r(psi_r,0); SplineInst->set(ispline, v_r); } inline void set_spline_domain(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, const int* offset_l, const int* mesh_l) { } bool read_splines(hdf_archive& h5f) { std::ostringstream o; o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->spline_m); return h5f.read(bigtable,o.str().c_str());//"spline_0"); } bool write_splines(hdf_archive& h5f) { std::ostringstream o; o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->spline_m); return h5f.write(bigtable,o.str().c_str());//"spline_0"); } /** convert position in PrimLattice unit and return sign */ inline int convertPos(const PointType& r, PointType& ru) { ru=PrimLattice.toUnit(r); int bc_sign=0; for(int i=0; i<D; i++) if( -std::numeric_limits<ST>::epsilon() < ru[i] && ru[i] < 0 ) ru[i] = ST(0.0); else { ST img = std::floor(ru[i]); ru[i] -= img; bc_sign += HalfG[i] * (int)img; } return bc_sign; } template<typename VV> inline void assign_v(int bc_sign, const vContainer_type& myV, VV& psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST signed_one = (bc_sign &1)? -1:1; #pragma omp simd for(size_t j=first; j<last; ++j) psi[first_spo+j]=signed_one*myV[j]; } template<typename VV> inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi) { const PointType& r=P.activeR(iat); PointType ru; int bc_sign=convertPos(r,ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last); assign_v(bc_sign,myV,psi,first,last); } } template<typename VM, typename VAV> inline void evaluateValues(const VirtualParticleSet& VP, VM& psiM, VAV& SPOMem) { #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); const size_t m=psiM.cols(); for(int iat=0; iat<VP.getTotalNum(); ++iat) { const PointType& r=VP.activeR(iat); PointType ru; int bc_sign=convertPos(r,ru); Vector<TT> psi(psiM[iat],m); spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last); assign_v(bc_sign,myV,psi,first,last); } } } inline size_t estimateMemory(const int nP) { return 0; } template<typename VV, typename GV> inline void assign_vgl(int bc_sign, VV& psi, GV& dpsi, VV& d2psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST signed_one = (bc_sign &1)? -1:1; const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST symGG[6]={GGt[0],GGt[1]+GGt[3],GGt[2]+GGt[6],GGt[4],GGt[5]+GGt[7],GGt[8]}; const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const ST* restrict h00=myH.data(0); const ST* restrict h01=myH.data(1); const ST* restrict h02=myH.data(2); const ST* restrict h11=myH.data(3); const ST* restrict h12=myH.data(4); const ST* restrict h22=myH.data(5); #pragma omp simd for(size_t j=first; j<last; ++j) { const size_t psiIndex=first_spo+j; psi[psiIndex]=signed_one*myV[j]; dpsi[psiIndex][0]=signed_one*(g00*g0[j]+g01*g1[j]+g02*g2[j]); dpsi[psiIndex][1]=signed_one*(g10*g0[j]+g11*g1[j]+g12*g2[j]); dpsi[psiIndex][2]=signed_one*(g20*g0[j]+g21*g1[j]+g22*g2[j]); d2psi[psiIndex]=signed_one*SymTrace(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],symGG); } } /** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian */ template<typename VV, typename GV> inline void assign_vgl_from_l(int bc_sign, VV& psi, GV& dpsi, VV& d2psi) { const ST signed_one = (bc_sign &1)? -1:1; const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); #pragma omp simd for(int psiIndex=first_spo; psiIndex<last_spo; ++psiIndex) { const size_t j=psiIndex-first_spo; psi[psiIndex]=signed_one*myV[j]; dpsi[psiIndex][0]=signed_one*g0[j]; dpsi[psiIndex][1]=signed_one*g1[j]; dpsi[psiIndex][2]=signed_one*g2[j]; d2psi[psiIndex]=signed_one*myL[j]; } } template<typename VV, typename GV> inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi) { const PointType& r=P.activeR(iat); PointType ru; int bc_sign=convertPos(r,ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last); assign_vgl(bc_sign,psi,dpsi,d2psi,first,last); } } template<typename VV, typename GV, typename GGV> void assign_vgh(int bc_sign, VV& psi, GV& dpsi, GGV& grad_grad_psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST signed_one = (bc_sign &1)? -1:1; const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const ST* restrict h00=myH.data(0); const ST* restrict h01=myH.data(1); const ST* restrict h02=myH.data(2); const ST* restrict h11=myH.data(3); const ST* restrict h12=myH.data(4); const ST* restrict h22=myH.data(5); #pragma omp simd for(size_t j=first; j<last; ++j) { //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[j]+g01*g1[j]+g02*g2[j]; const ST dY_r = g10*g0[j]+g11*g1[j]+g12*g2[j]; const ST dZ_r = g20*g0[j]+g21*g1[j]+g22*g2[j]; const size_t psiIndex=j+first_spo; psi[psiIndex] =signed_one*myV[j]; dpsi[psiIndex][0]=signed_one*dX_r; dpsi[psiIndex][1]=signed_one*dY_r; dpsi[psiIndex][2]=signed_one*dZ_r; const ST h_xx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g00,g01,g02); const ST h_xy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g10,g11,g12); const ST h_xz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g20,g21,g22); const ST h_yx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g00,g01,g02); const ST h_yy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g10,g11,g12); const ST h_yz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g20,g21,g22); const ST h_zx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g00,g01,g02); const ST h_zy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g10,g11,g12); const ST h_zz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g20,g21,g22); grad_grad_psi[psiIndex][0]=signed_one*h_xx_r; grad_grad_psi[psiIndex][1]=signed_one*h_xy_r; grad_grad_psi[psiIndex][2]=signed_one*h_xz_r; grad_grad_psi[psiIndex][3]=signed_one*h_yx_r; grad_grad_psi[psiIndex][4]=signed_one*h_yy_r; grad_grad_psi[psiIndex][5]=signed_one*h_yz_r; grad_grad_psi[psiIndex][6]=signed_one*h_zx_r; grad_grad_psi[psiIndex][7]=signed_one*h_zy_r; grad_grad_psi[psiIndex][8]=signed_one*h_zz_r; } } template<typename VV, typename GV, typename GGV, typename GGGV> void assign_vghgh(int bc_sign, VV& psi, GV& dpsi, GGV& grad_grad_psi, GGGV& grad_grad_grad_psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST signed_one = (bc_sign &1)? -1:1; const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const ST* restrict h00=myH.data(0); const ST* restrict h01=myH.data(1); const ST* restrict h02=myH.data(2); const ST* restrict h11=myH.data(3); const ST* restrict h12=myH.data(4); const ST* restrict h22=myH.data(5); const ST* restrict gh000=mygH.data(0); const ST* restrict gh001=mygH.data(1); const ST* restrict gh002=mygH.data(2); const ST* restrict gh011=mygH.data(3); const ST* restrict gh012=mygH.data(4); const ST* restrict gh022=mygH.data(5); const ST* restrict gh111=mygH.data(6); const ST* restrict gh112=mygH.data(7); const ST* restrict gh122=mygH.data(8); const ST* restrict gh222=mygH.data(9); //SIMD doesn't work quite right yet. Comment out until further debugging. //#pragma omp simd for (size_t j=first; j<last; ++j) { const ST val_r=myV[j]; //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[j]+g01*g1[j]+g02*g2[j]; const ST dY_r = g10*g0[j]+g11*g1[j]+g12*g2[j]; const ST dZ_r = g20*g0[j]+g21*g1[j]+g22*g2[j]; const size_t psiIndex=j+first_spo; psi[psiIndex] =signed_one*val_r; dpsi[psiIndex][0]=signed_one*dX_r; dpsi[psiIndex][1]=signed_one*dY_r; dpsi[psiIndex][2]=signed_one*dZ_r; //intermediates for computation of hessian. \partial_i \partial_j phi in cartesian coordinates. const ST f_xx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g00,g01,g02); const ST f_xy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g10,g11,g12); const ST f_xz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g20,g21,g22); const ST f_yy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g10,g11,g12); const ST f_yz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g20,g21,g22); const ST f_zz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g20,g21,g22); /* const ST h_xx_r=f_xx_r; const ST h_xy_r=f_xy_r+(kX*dY_i+kY*dX_i)-kX*kY*val_r; const ST h_xz_r=f_xz_r+(kX*dZ_i+kZ*dX_i)-kX*kZ*val_r; const ST h_yy_r=f_yy_r+2*kY*dY_i-kY*kY*val_r; const ST h_yz_r=f_yz_r+(kY*dZ_i+kZ*dY_i)-kY*kZ*val_r; const ST h_zz_r=f_zz_r+2*kZ*dZ_i-kZ*kZ*val_r; */ grad_grad_psi[psiIndex][0]=f_xx_r*signed_one; grad_grad_psi[psiIndex][1]=f_xy_r*signed_one; grad_grad_psi[psiIndex][2]=f_xz_r*signed_one; grad_grad_psi[psiIndex][4]=f_yy_r*signed_one; grad_grad_psi[psiIndex][5]=f_yz_r*signed_one; grad_grad_psi[psiIndex][8]=f_zz_r*signed_one; //symmetry: grad_grad_psi[psiIndex][3]=grad_grad_psi[psiIndex][1]; grad_grad_psi[psiIndex][6]=grad_grad_psi[psiIndex][2]; grad_grad_psi[psiIndex][7]=grad_grad_psi[psiIndex][5]; //These are the real and imaginary components of the third SPO derivative. _xxx denotes // third derivative w.r.t. x, _xyz, a derivative with resepect to x,y, and z, and so on. const ST f3_xxx_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00,g01,g02,g00,g01,g02,g00,g01,g02); const ST f3_xxy_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00,g01,g02,g00,g01,g02,g10,g11,g12); const ST f3_xxz_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00,g01,g02,g00,g01,g02,g20,g21,g22); const ST f3_xyy_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00,g01,g02,g10,g11,g12,g10,g11,g12); const ST f3_xyz_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00,g01,g02,g10,g11,g12,g20,g21,g22); const ST f3_xzz_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00,g01,g02,g20,g21,g22,g20,g21,g22); const ST f3_yyy_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g10,g11,g12,g10,g11,g12,g10,g11,g12); const ST f3_yyz_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g10,g11,g12,g10,g11,g12,g20,g21,g22); const ST f3_yzz_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g10,g11,g12,g20,g21,g22,g20,g21,g22); const ST f3_zzz_r=t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g20,g21,g22,g20,g21,g22,g20,g21,g22); //Here is where we build up the components of the physical hessian gradient, namely, d^3/dx^3(e^{-ik*r}\phi(r) /* const ST gh_xxx_r= f3_xxx_r + 3*kX*f_xx_i - 3*kX*kX*dX_r - kX*kX*kX*val_i; const ST gh_xxy_r= f3_xxy_r +(kY*f_xx_i+2*kX*f_xy_i) - (kX*kX*dY_r+2*kX*kY*dX_r)-kX*kX*kY*val_i; const ST gh_xxz_r= f3_xxz_r +(kZ*f_xx_i+2*kX*f_xz_i) - (kX*kX*dZ_r+2*kX*kZ*dX_r)-kX*kX*kZ*val_i; const ST gh_xyy_r= f3_xyy_r +(2*kY*f_xy_i+kX*f_yy_i) - (2*kX*kY*dY_r+kY*kY*dX_r)-kX*kY*kY*val_i; const ST gh_xyz_r= f3_xyz_r +(kX*f_yz_i+kY*f_xz_i+kZ*f_xy_i)-(kX*kY*dZ_r+kY*kZ*dX_r+kZ*kX*dY_r) - kX*kY*kZ*val_i; const ST gh_xzz_r= f3_xzz_r +(2*kZ*f_xz_i+kX*f_zz_i) - (2*kX*kZ*dZ_r+kZ*kZ*dX_r)-kX*kZ*kZ*val_i; const ST gh_yyy_r= f3_yyy_r + 3*kY*f_yy_i - 3*kY*kY*dY_r - kY*kY*kY*val_i; const ST gh_yyz_r= f3_yyz_r +(kZ*f_yy_i+2*kY*f_yz_i) - (kY*kY*dZ_r+2*kY*kZ*dY_r)-kY*kY*kZ*val_i; const ST gh_yzz_r= f3_yzz_r +(2*kZ*f_yz_i+kY*f_zz_i) - (2*kY*kZ*dZ_r+kZ*kZ*dY_r)-kY*kZ*kZ*val_i; const ST gh_zzz_r= f3_zzz_r + 3*kZ*f_zz_i - 3*kZ*kZ*dZ_r - kZ*kZ*kZ*val_i;*/ //[x][xx] //These are the unique entries grad_grad_grad_psi[psiIndex][0][0]=signed_one*f3_xxx_r; grad_grad_grad_psi[psiIndex][0][1]=signed_one*f3_xxy_r; grad_grad_grad_psi[psiIndex][0][2]=signed_one*f3_xxz_r; grad_grad_grad_psi[psiIndex][0][4]=signed_one*f3_xyy_r; grad_grad_grad_psi[psiIndex][0][5]=signed_one*f3_xyz_r; grad_grad_grad_psi[psiIndex][0][8]=signed_one*f3_xzz_r; //filling in the symmetric terms. Filling out the xij terms grad_grad_grad_psi[psiIndex][0][3]=grad_grad_grad_psi[psiIndex][0][1]; grad_grad_grad_psi[psiIndex][0][6]=grad_grad_grad_psi[psiIndex][0][2]; grad_grad_grad_psi[psiIndex][0][7]=grad_grad_grad_psi[psiIndex][0][5]; //Now for everything that's a permutation of the above: grad_grad_grad_psi[psiIndex][1][0]=grad_grad_grad_psi[psiIndex][0][1]; grad_grad_grad_psi[psiIndex][1][1]=grad_grad_grad_psi[psiIndex][0][4]; grad_grad_grad_psi[psiIndex][1][2]=grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][1][3]=grad_grad_grad_psi[psiIndex][0][4]; grad_grad_grad_psi[psiIndex][1][6]=grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][2][0]=grad_grad_grad_psi[psiIndex][0][2]; grad_grad_grad_psi[psiIndex][2][1]=grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][2][2]=grad_grad_grad_psi[psiIndex][0][8]; grad_grad_grad_psi[psiIndex][2][3]=grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][2][6]=grad_grad_grad_psi[psiIndex][0][8]; grad_grad_grad_psi[psiIndex][1][4]=signed_one*f3_yyy_r; grad_grad_grad_psi[psiIndex][1][5]=signed_one*f3_yyz_r; grad_grad_grad_psi[psiIndex][1][8]=signed_one*f3_yzz_r; grad_grad_grad_psi[psiIndex][1][7]=grad_grad_grad_psi[psiIndex][1][5]; grad_grad_grad_psi[psiIndex][2][4]=grad_grad_grad_psi[psiIndex][1][5]; grad_grad_grad_psi[psiIndex][2][5]=grad_grad_grad_psi[psiIndex][1][8]; grad_grad_grad_psi[psiIndex][2][7]=grad_grad_grad_psi[psiIndex][1][8]; grad_grad_grad_psi[psiIndex][2][8]=signed_one*f3_zzz_r; } } template<typename VV, typename GV, typename GGV> void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi) { const PointType& r=P.activeR(iat); PointType ru; int bc_sign=convertPos(r,ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last); assign_vgh(bc_sign,psi,dpsi,grad_grad_psi,first,last); } } template<typename VV, typename GV, typename GGV, typename GGGV> void evaluate_vghgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi, GGGV& grad_grad_grad_psi) { const PointType& r=P.activeR(iat); PointType ru; int bc_sign=convertPos(r,ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vghgh(SplineInst->spline_m,ru,myV,myG,myH,mygH,first,last); assign_vghgh(bc_sign,psi,dpsi,grad_grad_psi,grad_grad_grad_psi,first,last); } } }; } #endif
requires-4.c
#pragma omp requires unified_shared_memory,unified_address,reverse_offload void foo (void) { #pragma omp target ; } #pragma omp requires unified_shared_memory /* { dg-error "'unified_shared_memory' clause used lexically after first target construct or offloading API" } */ #pragma omp requires unified_address /* { dg-error "'unified_address' clause used lexically after first target construct or offloading API" } */ #pragma omp requires reverse_offload /* { dg-error "'reverse_offload' clause used lexically after first target construct or offloading API" } */ /* { dg-prune-output "not supported yet" } */
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char* kernel0 = (const signed char*)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(4 * 4, inch, outch, 2ul); // G const short ktm[4][3] = { {2, 0, 0}, {1, 1, 1}, {1, -1, 1}, {0, 0, 2} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4 * 4, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[4], d1[4], d2[4], d3[4]; short w0[4], w1[4], w2[4], w3[4]; short t0[4], t1[4], t2[4], t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm0[n + 4] = d1[n]; out_tm0[n + 8] = d2[n]; out_tm0[n + 12] = d3[n]; } r0 += 2; r1 += 2; r2 += 2; r3 += 2; out_tm0 += 16; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); for (int i = 0; i < tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int* output1_tm = out1_tm.row<int>(i); int* output2_tm = out2_tm.row<int>(i); int* output3_tm = out3_tm.row<int>(i); int sum0[16] = {0}; int sum1[16] = {0}; int sum2[16] = {0}; int sum3[16] = {0}; int q = 0; for (; q + 3 < inch; q += 4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; k0 += 16; sum0[n] += (int)r1[n] * k0[n]; k0 += 16; sum0[n] += (int)r2[n] * k0[n]; k0 += 16; sum0[n] += (int)r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += (int)r0[n] * k1[n]; k1 += 16; sum1[n] += (int)r1[n] * k1[n]; k1 += 16; sum1[n] += (int)r2[n] * k1[n]; k1 += 16; sum1[n] += (int)r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += (int)r0[n] * k2[n]; k2 += 16; sum2[n] += (int)r1[n] * k2[n]; k2 += 16; sum2[n] += (int)r2[n] * k2[n]; k2 += 16; sum2[n] += (int)r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += (int)r0[n] * k3[n]; k3 += 16; sum3[n] += (int)r1[n] * k3[n]; k3 += 16; sum3[n] += (int)r2[n] * k3[n]; k3 += 16; sum3[n] += (int)r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q < inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum1[n] += (int)r0[n] * k1[n]; sum2[n] += (int)r0[n] * k2[n]; sum3[n] += (int)r0[n] * k3[n]; } } for (int n = 0; n < 16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i = 0; i < tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[16] = {0}; int q = 0; for (; q + 3 < inch; q += 4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel0_tm.row<short>(q + 1); const short* k2 = kernel0_tm.row<short>(q + 2); const short* k3 = kernel0_tm.row<short>(q + 3); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum0[n] += (int)r1[n] * k1[n]; sum0[n] += (int)r2[n] * k2[n]; sum0[n] += (int)r3[n] * k3[n]; } } for (; q < inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n = 0; n < 16; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j = 0; j < nColBlocks; j++) { int* outRow0 = out.row<int>(j * 2); int* outRow1 = out.row<int>(j * 2 + 1); for (int i = 0; i < nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j * nRowBlocks + i); int s0[4], s1[4], s2[4], s3[4]; int w0[4], w1[4]; int d0[2], d1[2], d2[2], d3[2]; int o0[2], o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 4]; s2[n] = out_tile[n + 8]; s3[n] = out_tile[n + 12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; outRow0 += 2; outRow1 += 2; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(6 * 6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 24} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(6 * 6, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm for (int n = 0; n < 6; n++) { out_tm0[n] = d0[n]; out_tm0[n + 6] = d1[n]; out_tm0[n + 12] = d2[n]; out_tm0[n + 18] = d3[n]; out_tm0[n + 24] = d4[n]; out_tm0[n + 30] = d5[n]; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; out_tm0 += 36; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i = 0; i < tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[36] = {0}; for (int q = 0; q < inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n = 0; n < 36; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n = 0; n < 36; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j = 0; j < nColBlocks; j++) { int* outRow0 = out.row<int>(j * 4); int* outRow1 = out.row<int>(j * 4 + 1); int* outRow2 = out.row<int>(j * 4 + 2); int* outRow3 = out.row<int>(j * 4 + 3); for (int i = 0; i < nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j * nRowBlocks + i); int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; int w0[6], w1[6], w2[6], w3[6]; int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; int o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char* kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char* kernel0 = (const signed char*)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } }
GB_binop__isgt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint8) // A*D function (colscale): GB (_AxD__isgt_uint8) // D*A function (rowscale): GB (_DxB__isgt_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint8) // C=scalar+B GB (_bind1st__isgt_uint8) // C=scalar+B' GB (_bind1st_tran__isgt_uint8) // C=A+scalar GB (_bind2nd__isgt_uint8) // C=A'+scalar GB (_bind2nd_tran__isgt_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__erfc_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__erfc_fp32_fp32) // op(A') function: GB (_unop_tran__erfc_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = erfcf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = erfcf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = erfcf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ERFC || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__erfc_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = erfcf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = erfcf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__erfc_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_int32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_fp32 // op(A') function: GB_tran__identity_int32_fp32 // C type: int32_t // A type: float // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int32_t z ; GB_CAST_SIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_fp32 ( int32_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ParallelOpenMP.h
#pragma once #include <ATen/ATen.h> #include <cstddef> #include <exception> #ifdef _OPENMP #define INTRA_OP_PARALLEL #include <omp.h> #endif namespace at { template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return; } #ifdef _OPENMP std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; // choose number of tasks based on grain size and number of threads int64_t num_threads = omp_in_parallel() ? 1 : omp_get_max_threads(); if (grain_size > 0) { num_threads = std::min(num_threads, divup((end - begin), grain_size)); } #pragma omp parallel num_threads(num_threads) { int64_t num_threads = omp_get_num_threads(); int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) { try { f(begin_tid, std::min(end, chunk_size + begin_tid)); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } } if (eptr) { std::rethrow_exception(eptr); } #else f(begin, end); #endif } template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F& f, const SF& sf) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return ident; } else if (in_parallel_region() || get_num_threads() == 1) { return f(begin, end, ident); } else { const int64_t num_results = divup((end - begin), grain_size); std::vector<scalar_t> results(num_results); scalar_t* results_data = results.data(); std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel for if ((end - begin) >= grain_size) for (int64_t id = 0; id < num_results; id++) { int64_t i = begin + id * grain_size; try { results_data[id] = f(i, i + std::min(end - i, grain_size), ident); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } if (eptr) { std::rethrow_exception(eptr); } scalar_t result = ident; for (auto partial_result : results) { result = sf(result, partial_result); } return result; } } } // namespace at
lagrangian_particle_utilities.h
/* ============================================================================== KratosTestApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2010 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: rrossi $ // Date: $Date: 2007-03-06 10:30:31 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED ) #define KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED #define PRESSURE_ON_EULERIAN_MESH #define USE_FEW_PARTICLES // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/cfd_variables.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "incompressible_fluid_application.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "processes/node_erase_process.h" #include "utilities/binbased_fast_point_locator.h" namespace Kratos { template< class T, std::size_t dim > class DistanceCalculator { public: double operator()(T const& p1, T const& p2) { double dist = 0.0; for (std::size_t i = 0; i < dim; i++) { double tmp = p1[i] - p2[i]; dist += tmp*tmp; } return dist; //square distance because it is easier to work without the square root// } }; template<std::size_t TDim> class LagrangianParticleUtils { public: KRATOS_CLASS_POINTER_DEFINITION(LagrangianParticleUtils<TDim>); //********************************************************************************************** //********************************************************************************************** void BackAndForth(array_1d<double, 3 > & body_force, const double density, const double dt, const double subdivisions, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart) { KRATOS_TRY //clear lagrangian model par and reseed it rLagrangianModelPart.Nodes().clear(); Reseed(rEulerianModelPart, rLagrangianModelPart); double density_inverse = 1.0 / density; //defintions for spatial search typedef Node < 3 > PointType; typedef Node < 3 > ::Pointer PointTypePointer; typedef std::vector<PointType::Pointer> PointVector; typedef std::vector<PointType::Pointer>::iterator PointIterator; typedef std::vector<double> DistanceVector; typedef std::vector<double>::iterator DistanceIterator; //creating an auxiliary list for the new nodes PointVector list_of_nodes; //************* // Bucket types typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType; // typedef Bins< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > StaticBins; // typedef BinsDynamic< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > DynamicBins; //************* // DynamicBins; typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree; // typedef Tree< OCTreePartition<BucketType> > tree; //Octree; // typedef Tree< StaticBins > tree; //Binstree; // typedef Tree< KDTreePartition<StaticBins> > tree; //KdtreeBins; // typedef typename KdtreeBins::Partitions SubPartitions; // typedef Tree< OCTreePartition<StaticBins> > tree; //OctreeBins; /* typedef Bins< TDim, PointType, stdPointVector> stdBins; typedef Tree< Bins<TDim,PointType,stdPointVector> > tree; //stdStaticBins;*/ //starting calculating time of construction of the kdtree boost::timer kdtree_construction; for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); ++node_it) { PointTypePointer pnode = *(node_it.base()); pnode->Set(TO_ERASE, true); node_it->GetValue(IS_VISITED) = 0; //putting the nodes of the destination_model part in an auxiliary list list_of_nodes.push_back(pnode); // //reset the position to the position at the end of the step // array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1); // (node_it)->FastGetSolutionStepValue(DISPLACEMENT) = old_disp; // // (node_it)->X() = (node_it)->X0() + old_disp[0]; // (node_it)->Y() = (node_it)->Y0() + old_disp[1]; // (node_it)->Z() = (node_it)->Z0() + old_disp[2]; } std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl; //work arrays Node < 3 > work_point(0, 0.0, 0.0, 0.0); unsigned int MaximumNumberOfResults = 10000; PointVector Results(MaximumNumberOfResults); DistanceVector SquaredResultsDistances(MaximumNumberOfResults); array_1d<double, TDim + 1 > N; //Shape functions vector// array_1d<double, TDim + 1 > pressures; //Shape functions vector// boost::numeric::ublas::bounded_matrix<double, TDim + 1, TDim> DN_DX; array_1d<double, TDim> gradp; array_1d<double, 3 > acc_particle; array_1d<double, 3 > veulerian; //create a spatial database with the list of new nodes unsigned int bucket_size = 20; double small_dt = dt / subdivisions; for (unsigned int substep = 0; substep < subdivisions; substep++) { //compute the tree with the position of the nodes tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size); //loop over all of the elements in the eulerian mesh to perform the interpolation for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >&geom = el_it->GetGeometry(); //find the center and "radius" of the element double xc, yc, zc, radius; CalculateCenterAndSearchRadius(geom, xc, yc, zc, radius, N); work_point.X() = xc; work_point.Y() = yc; work_point.Z() = zc; //find all of the new nodes within the radius int number_of_points_in_radius; //look between the new nodes which of them is inside the radius of the circumscribed cyrcle number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults); if (number_of_points_in_radius > 0) { //check if inside for (PointIterator it_found = Results.begin(); it_found != Results.begin() + number_of_points_in_radius; it_found++) { bool is_inside = false; is_inside = CalculatePosition(geom, (*it_found)->X(), (*it_found)->Y(), (*it_found)->Z(), N); if (is_inside == true && (*it_found)->GetValue(IS_VISITED) == 0) { // KRATOS_WATCH("219") (*it_found)->GetValue(IS_VISITED) = 1; //move according to the streamline noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & disp = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) -= small_dt*veulerian; array_1d<double, 3 > & vel_particle = (*it_found)->FastGetSolutionStepValue(VELOCITY); noalias(vel_particle) = veulerian; } } } } //position is to be updated only after all of the searches! // std::cout << "substep= " << substep << (rLagrangianModelPart.NodesBegin()+200)->FastGetSolutionStepValue(DISPLACEMENT) << std::endl; for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin(); it != rLagrangianModelPart.NodesEnd(); it++) { noalias(it->Coordinates()) = it->GetInitialPosition(); noalias(it->Coordinates()) += it->FastGetSolutionStepValue(DISPLACEMENT); (it)->GetValue(IS_VISITED) = 0; } } //now go forth (computing the acceleration) for (unsigned int substep = 0; substep < subdivisions; substep++) { //compute the tree with the position of the nodes tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size); //loop over all of the elements in the eulerian mesh to perform the interpolation for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >&geom = el_it->GetGeometry(); //find the center and "radius" of the element double xc, yc, zc, radius; CalculateCenterAndSearchRadius(geom, xc, yc, zc, radius, N); work_point.X() = xc; work_point.Y() = yc; work_point.Z() = zc; //find all of the new nodes within the radius int number_of_points_in_radius; //look between the new nodes which of them is inside the radius of the circumscribed cyrcle number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults); if (number_of_points_in_radius > 0) { //check if inside for (PointIterator it_found = Results.begin(); it_found != Results.begin() + number_of_points_in_radius; it_found++) { bool is_inside = false; is_inside = CalculatePosition(geom, (*it_found)->X(), (*it_found)->Y(), (*it_found)->Z(), N); if (is_inside == true && (*it_found)->GetValue(IS_VISITED) == 0) { (*it_found)->GetValue(IS_VISITED) = 1; //move according to the streamline noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & disp = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; //compute particle velocity noalias(acc_particle) = body_force - N[0] * geom[0].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse; for (unsigned int k = 1; k < geom.size(); k++) noalias(acc_particle) -= N[k] * geom[k].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse; array_1d<double, 3 > & vel_particle = (*it_found)->FastGetSolutionStepValue(VELOCITY); noalias(vel_particle) += small_dt*acc_particle; } } } } //position is to be updated only after all of the searches! // std::cout << "substep= " << substep << (rLagrangianModelPart.NodesBegin()+200)->FastGetSolutionStepValue(DISPLACEMENT) << std::endl; for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin(); it != rLagrangianModelPart.NodesEnd(); it++) { noalias(it->Coordinates()) = it->GetInitialPosition(); noalias(it->Coordinates()) += it->FastGetSolutionStepValue(DISPLACEMENT); (it)->GetValue(IS_VISITED) = 0; } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ConvectParticles(const double dt, const double subdivisions, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, bool use_eulerian_velocity) { KRATOS_TRY //defintions for spatial search typedef Node < 3 > PointType; typedef Node < 3 > ::Pointer PointTypePointer; typedef std::vector<PointType::Pointer> PointVector; typedef std::vector<PointType::Pointer>::iterator PointIterator; typedef std::vector<double> DistanceVector; typedef std::vector<double>::iterator DistanceIterator; //creating an auxiliary list for the new nodes PointVector list_of_nodes; //************* // Bucket types typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType; // typedef Bins< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > StaticBins; // typedef BinsDynamic< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > DynamicBins; //************* // DynamicBins; typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree; // typedef Tree< OCTreePartition<BucketType> > tree; //Octree; // typedef Tree< StaticBins > tree; //Binstree; // typedef Tree< KDTreePartition<StaticBins> > tree; //KdtreeBins; // typedef typename KdtreeBins::Partitions SubPartitions; // typedef Tree< OCTreePartition<StaticBins> > tree; //OctreeBins; /* typedef Bins< TDim, PointType, stdPointVector> stdBins; typedef Tree< Bins<TDim,PointType,stdPointVector> > tree; //stdStaticBins;*/ //starting calculating time of construction of the kdtree boost::timer kdtree_construction; for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); ++node_it) { PointTypePointer pnode = *(node_it.base()); pnode->Set(TO_ERASE, true); node_it->GetValue(IS_VISITED) = 0; //putting the nodes of the destination_model part in an auxiliary list list_of_nodes.push_back(pnode); //reset the position to the position at the end of the step array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1); (node_it)->FastGetSolutionStepValue(DISPLACEMENT) = old_disp; (node_it)->X() = (node_it)->X0() + old_disp[0]; (node_it)->Y() = (node_it)->Y0() + old_disp[1]; (node_it)->Z() = (node_it)->Z0() + old_disp[2]; } std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl; //work arrays Node < 3 > work_point(0, 0.0, 0.0, 0.0); unsigned int MaximumNumberOfResults = 10000; PointVector Results(MaximumNumberOfResults); DistanceVector SquaredResultsDistances(MaximumNumberOfResults); array_1d<double, TDim + 1 > N; //Shape functions vector// array_1d<double, TDim + 1 > pressures; //Shape functions vector// boost::numeric::ublas::bounded_matrix<double, TDim + 1, TDim> DN_DX; //create a spatial database with the list of new nodes unsigned int bucket_size = 20; double small_dt = dt / subdivisions; for (unsigned int substep = 0; substep < subdivisions; substep++) { //compute the tree with the position of the nodes tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size); //loop over all of the elements in the eulerian mesh to perform the interpolation for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >&geom = el_it->GetGeometry(); //find the center and "radius" of the element double xc, yc, zc, radius; CalculateCenterAndSearchRadius(geom, xc, yc, zc, radius, N); work_point.X() = xc; work_point.Y() = yc; work_point.Z() = zc; //find all of the new nodes within the radius int number_of_points_in_radius; //look between the new nodes which of them is inside the radius of the circumscribed cyrcle number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults); array_1d<double, 3 > veulerian; if (number_of_points_in_radius > 0) { //check if inside for (PointIterator it_found = Results.begin(); it_found != Results.begin() + number_of_points_in_radius; it_found++) { bool is_inside = false; is_inside = CalculatePosition(geom, (*it_found)->X(), (*it_found)->Y(), (*it_found)->Z(), N); if (is_inside == true && (*it_found)->GetValue(IS_VISITED) == 0) { (*it_found)->GetValue(IS_VISITED) = 1; //move according to the streamline noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & disp = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; (*it_found)->Set(TO_ERASE, false); if (substep == 0 && use_eulerian_velocity == true) { double temperature = N[0] * geom[0].FastGetSolutionStepValue(TEMPERATURE); for (unsigned int k = 1; k < geom.size(); k++) temperature += N[k] * geom[k].FastGetSolutionStepValue(TEMPERATURE); (*it_found)->FastGetSolutionStepValue(TEMPERATURE) = temperature; } } } } } //position is to be updated only after all of the searches! // std::cout << "substep= " << substep << (rLagrangianModelPart.NodesBegin()+200)->FastGetSolutionStepValue(DISPLACEMENT) << std::endl; for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin(); it != rLagrangianModelPart.NodesEnd(); it++) { noalias(it->Coordinates()) = it->GetInitialPosition(); noalias(it->Coordinates()) += it->FastGetSolutionStepValue(DISPLACEMENT); (it)->GetValue(IS_VISITED) = 0; } } //perform the erase NodeEraseProcess(rLagrangianModelPart).Execute(); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void StreamlineMove(array_1d<double, 3 > & body_force, const double density, const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, bool use_eulerian_velocity, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(FORCE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", ""); //should be done outside!!! // BinBasedFastPointLocator<TDim> node_locator(rEulerianModelPart); // node_locator.UpdateSearchDatabase(); double density_inverse = 1.0 / density; //reset particle position to the beginning of the step for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); ++node_it) { Node < 3 > ::Pointer pnode = *(node_it.base()); pnode->Set(TO_ERASE, true); node_it->GetValue(IS_VISITED) = 0; //reset the position to the position at the end of the step const array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1); noalias((node_it)->FastGetSolutionStepValue(DISPLACEMENT)) = old_disp; const array_1d<double, 3 > & old_vel = (node_it)->FastGetSolutionStepValue(VELOCITY, 1); noalias((node_it)->FastGetSolutionStepValue(VELOCITY)) = old_vel; (node_it)->X() = (node_it)->X0() + old_disp[0]; (node_it)->Y() = (node_it)->Y0() + old_disp[1]; (node_it)->Z() = (node_it)->Z0() + old_disp[2]; } //KRATOS_WATCH("539") array_1d<double, 3 > veulerian; array_1d<double, 3 > acc_particle; array_1d<double, TDim + 1 > N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //KRATOS_WATCH("551") #pragma omp parallel for firstprivate(results,N,veulerian,acc_particle) for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; unsigned int subdivisions = 1; double small_dt = dt; while(substep++ < subdivisions) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; (iparticle)->Set(TO_ERASE, true); Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; // KRATOS_WATCH("561") bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); // KRATOS_WATCH("564") if (is_found == true) { (pparticle)->GetValue(IS_VISITED) = 1; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1); //compute adaptive subdivisions if(substep == 1) { //compute h double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H); for (unsigned int k = 1; k < geom.size(); k++) h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H); //compute number of subdivisions needed const unsigned int min_subdivisions = 3; const unsigned int max_subdivisions = 20; double v = norm_2(veulerian); double subdivisions = double(floor(2*dt*v/h)); subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions; //compute subdivisions time step small_dt = dt / subdivisions; //KRATOS_WATCH(subdivisions) } //move according to the streamline array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; (pparticle)->Set(TO_ERASE, false); // KRATOS_WATCH("585") //compute particle velocity noalias(acc_particle) = body_force - N[0] * geom[0].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse; for (unsigned int k = 1; k < geom.size(); k++) noalias(acc_particle) -= N[k] * geom[k].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse; array_1d<double, 3 > & force_particle = (iparticle)->FastGetSolutionStepValue(FORCE); noalias(force_particle) = ZeroVector(3); for (unsigned int k = 0; k < geom.size(); k++) { noalias(acc_particle) += N[k] * geom[k].FastGetSolutionStepValue(FORCE) * density_inverse; force_particle += N[k] * geom[k].FastGetSolutionStepValue(FORCE) * density_inverse; } //KRATOS_WATCH("599") array_1d<double, 3 > & vel_particle = (pparticle)->FastGetSolutionStepValue(VELOCITY); if (use_eulerian_velocity == true) { if (substep == 1) { // noalias((*it_found)->FastGetSolutionStepValue(DISPLACEMENT)) = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT,1); noalias(vel_particle) = veulerian; noalias((pparticle)->FastGetSolutionStepValue(VELOCITY, 1)) = veulerian; } } noalias(vel_particle) += small_dt*acc_particle; //update position noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition(); noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT); (iparticle)->GetValue(IS_VISITED) = 0; //KRATOS_WATCH("619") } } } //KRATOS_WATCH("622") //erase nodes whose velocity is far inconsistent with the displacement increment (typically nodes that get stuck to the wall) // for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin(); // it != rLagrangianModelPart.NodesEnd(); it++) // { // if (it->Is(TO_ERASE)!= true) // { // array_1d<double,3> delta_disp = it->FastGetSolutionStepValue (DISPLACEMENT); // noalias (delta_disp) -= it->FastGetSolutionStepValue (DISPLACEMENT,1); // double norm_delta_disp = norm_2 (delta_disp); // // array_1d<double,3> avg_vel = it->FastGetSolutionStepValue(VELOCITY); // // avg_vel += it->FastGetSolutionStepValue(VELOCITY,1); // // avg_vel *= 0.5; // // double norm_v = norm_2(avg_vel); // array_1d<double,3> v_old = it->FastGetSolutionStepValue (VELOCITY,1); // double norm_v = norm_2 (v_old); // if (norm_delta_disp*3.0 < norm_v*dt ) // it->Set(TO_ERASE, true); // if (norm_delta_disp*0.333333333333333 > norm_v*dt ) // it->Set(TO_ERASE, true); // } // } //perform the erase // int nparticles_before_erase = rLagrangianModelPart.Nodes().size(); //NodeEraseProcess(rLagrangianModelPart).Execute(); // int nparticles_after_erase = rLagrangianModelPart.Nodes().size(); // std::cout << "n particles erased during streamline move =" << nparticles_after_erase - nparticles_before_erase <<std::endl; KRATOS_CATCH("") } void StreamlineCorrect(const double density, const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart,BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(FORCE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", ""); double density_inverse = 1.0 / density; array_1d<double, 3 > acc_particle; array_1d<double, TDim + 1 > N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); /* //reset particle position to the beginning of the step for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); ++node_it) { Node < 3 > ::Pointer pnode = *(node_it.base()); node_it->GetValue(IS_VISITED) = 0; //reset the position to the position at the end of the step const array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1); (node_it)->X() = (node_it)->X0() + old_disp[0]; (node_it)->Y() = (node_it)->Y0() + old_disp[1]; (node_it)->Z() = (node_it)->Z0() + old_disp[2]; }*/ #pragma omp parallel for firstprivate(results,N,acc_particle) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { (pparticle)->GetValue(IS_VISITED) = 1; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //correct particle velocity noalias(acc_particle) = - N[0]* density_inverse * (geom[0].FastGetSolutionStepValue(PRESS_PROJ) - geom[0].FastGetSolutionStepValue(PRESS_PROJ,1) ); for (unsigned int k = 1; k < geom.size(); k++) noalias(acc_particle) -= N[k]* density_inverse * (geom[k].FastGetSolutionStepValue(PRESS_PROJ) - geom[k].FastGetSolutionStepValue(PRESS_PROJ,1)); array_1d<double, 3 > & vel_particle = (pparticle)->FastGetSolutionStepValue(VELOCITY); noalias(vel_particle) += dt*acc_particle; } } /* //bring back particles to their position for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); ++node_it) { Node < 3 > ::Pointer pnode = *(node_it.base()); //reset the position to the position at the end of the step const array_1d<double, 3 > & disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT); (node_it)->X() = (node_it)->X0() + disp[0]; (node_it)->Y() = (node_it)->Y0() + disp[1]; (node_it)->Z() = (node_it)->Z0() + disp[2]; }*/ KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** //function to seed a list of new nodes void Reseed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart) { KRATOS_TRY; unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; rLagrangianModelPart.Nodes().clear(); for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++) { int node_id = id++; double x = node_it->X(); double y = node_it->Y(); double z = node_it->Z(); Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, x, y, z); pnode->FastGetSolutionStepValue(VELOCITY) = node_it->FastGetSolutionStepValue(VELOCITY); } #ifdef USE_FEW_PARTICLES boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > pos; boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > N; #else boost::numeric::ublas::bounded_matrix<double, 16, 3 > pos; boost::numeric::ublas::bounded_matrix<double, 16, 3 > N; #endif for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); ComputeGaussPointPositions(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); for (unsigned int j = 0; j < TDim + 1; j++) noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); } } for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY); } KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** //function to seed a list of new nodes void ReseedEmptyElements(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator, int min_number_of_particles, int max_number_of_particles) { KRATOS_TRY; int ninitial_particles = rLagrangianModelPart.Nodes().size(); //generate a tree with the position of the lagrangian nodes // typedef Node < 3 > PointType; // typedef Node < 3 > ::Pointer PointTypePointer; //unsigned int min_number_of_particles = 1; int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetValue(YOUNG_MODULUS,0.0); } for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin(); pparticle != rLagrangianModelPart.NodesEnd(); pparticle++) { pparticle->Set(TO_ERASE,false);; } //count particles that fall within an element array_1d<double, TDim + 1 > N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //count particles within an element #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { double& counter = pelement->GetValue(YOUNG_MODULUS); #pragma omp atomic counter += 1.0; } } //erase particles within elements for which reseeding is needed #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { double& counter = pelement->GetValue(YOUNG_MODULUS); if(counter < min_number_of_particles) pparticle->Set(TO_ERASE,true); else if(counter > max_number_of_particles) //delete if there are too many { #pragma omp atomic counter -= 1; pparticle->Set(TO_ERASE,true); } } } //perform the erase NodeEraseProcess(rLagrangianModelPart).Execute(); int nafter_erase_particles = rLagrangianModelPart.Nodes().size(); std::cout << "n particles erased during reseed =" << nafter_erase_particles - ninitial_particles <<std::endl; //now do reseed #ifdef USE_FEW_PARTICLES boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > pos; boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > Nnew; #else boost::numeric::ublas::bounded_matrix<double, 16, 3 > pos; boost::numeric::ublas::bounded_matrix<double, 16, 3 > Nnew; #endif //if there are less than the number of particles we decide, reseed the element for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { if (el_it->GetValue(YOUNG_MODULUS) < min_number_of_particles) { Geometry< Node<3> >& geom = el_it->GetGeometry(); ComputeGaussPointPositions(geom, pos, Nnew); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); for (unsigned int j = 0; j < TDim + 1; j++) noalias(vel) += Nnew(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); array_1d<double, 3 > & vel_old = pnode->FastGetSolutionStepValue(VELOCITY, 1); noalias(vel_old) = ZeroVector(3); for (unsigned int j = 0; j < TDim + 1; j++) noalias(vel_old) += Nnew(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY, 1); } } } int nfinal_particles = rLagrangianModelPart.Nodes().size(); std::cout << "n particles added during reseed =" << nfinal_particles - ninitial_particles <<std::endl; KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart) { KRATOS_TRY; rCompleteModelPart.Elements() = rEulerianModelPart.Elements(); rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes(); unsigned int id; if(rEulerianModelPart.Nodes().size()!= 0) id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; else id = 1; //preallocate the memory needed int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size(); rCompleteModelPart.Nodes().reserve( tot_nodes ); //note that here we renumber the nodes for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { rCompleteModelPart.AddNode(*(node_it.base())); node_it->SetId(id++); } KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void TransferToEulerianMesh(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart) { KRATOS_TRY //defintions for spatial search typedef Node < 3 > PointType; typedef Node < 3 > ::Pointer PointTypePointer; typedef std::vector<PointType::Pointer> PointVector; typedef std::vector<PointType::Pointer>::iterator PointIterator; typedef std::vector<double> DistanceVector; typedef std::vector<double>::iterator DistanceIterator; //creating an auxiliary list for the new nodes PointVector list_of_nodes; //************* // Bucket types typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType; // typedef Bins< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > StaticBins; // typedef BinsDynamic< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > DynamicBins; //************* // DynamicBins; typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree; // typedef Tree< OCTreePartition<BucketType> > tree; //Octree; // typedef Tree< StaticBins > tree; //Binstree; // typedef Tree< KDTreePartition<StaticBins> > tree; //KdtreeBins; // typedef typename KdtreeBins::Partitions SubPartitions; // typedef Tree< OCTreePartition<StaticBins> > tree; //OctreeBins; /* typedef Bins< TDim, PointType, stdPointVector> stdBins; typedef Tree< Bins<TDim,PointType,stdPointVector> > tree; //stdStaticBins;*/ //starting calculating time of construction of the kdtree boost::timer kdtree_construction; for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); ++node_it) { PointTypePointer pnode = *(node_it.base()); //putting the nodes of the destination_model part in an auxiliary list list_of_nodes.push_back(pnode); } std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl; //create a spatial database with the list of new nodes unsigned int bucket_size = 20; tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size); //work arrays Node < 3 > work_point(0, 0.0, 0.0, 0.0); unsigned int MaximumNumberOfResults = 10000; PointVector Results(MaximumNumberOfResults); DistanceVector SquaredResultsDistances(MaximumNumberOfResults); if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_H) == false) KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); double sigma = 0.0; if (TDim == 2) sigma = 10.0 / (7.0 * 3.1415926); else sigma = 1.0 / 3.1415926; for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++) { work_point.X() = node_it->X(); work_point.Y() = node_it->Y(); work_point.Z() = node_it->Z(); double radius = 0.6 * node_it->FastGetSolutionStepValue(NODAL_H); //find all of the new nodes within the radius int number_of_points_in_radius; //look between the new nodes which of them is inside the radius of the circumscribed cyrcle number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults); if (number_of_points_in_radius > 0) { array_1d<double, 3 > & vel = (node_it)->FastGetSolutionStepValue(VELOCITY); double& temperature = (node_it)->FastGetSolutionStepValue(TEMPERATURE); array_1d<double, 3 > original_vel = vel; double original_temperature = temperature; noalias(vel) = ZeroVector(3); temperature = 0.0; double tot_weight = 0.0; for (int k = 0; k < number_of_points_in_radius; k++) { // double weight = 1.0; double distance = sqrt(*(SquaredResultsDistances.begin() + k)); double weight = SPHCubicKernel(sigma, distance, radius); // KRATOS_WATCH(weight); // double weight = 1.0 / (sqrt(SquaredResultsDistances[k]) + 1e-9); tot_weight += weight; // tot_weight += 1.0; PointIterator it_found = Results.begin() + k; // array_1d<double,3> aux = (*it_found)->Coordinates()-node_it->Coordinates(); // KRATOS_WATCH(norm_2(aux)); // KRATOS_WATCH( *(SquaredResultsDistances.begin()+k) ); const array_1d<double, 3 > particle_velocity = (*it_found)->FastGetSolutionStepValue(VELOCITY); const double particle_temperature = (*it_found)->FastGetSolutionStepValue(TEMPERATURE); noalias(vel) += weight * particle_velocity; temperature += weight * particle_temperature; } vel /= tot_weight; temperature /= tot_weight; if (node_it->IsFixed(VELOCITY_X)) { noalias(vel) = original_vel; } if (node_it->IsFixed(TEMPERATURE)) temperature = original_temperature; } else { if (node_it->IsFixed(VELOCITY_X)) node_it->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void TransferToEulerianMeshShapeBased(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(FORCE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", ""); if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(TEMPERATURE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add ----TEMPERATURE---- variable!!!!!! ERROR", ""); //defintions for spatial search // typedef Node < 3 > PointType; // typedef Node < 3 > ::Pointer PointTypePointer; for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++) { if (node_it->IsFixed(VELOCITY_X) == false) { (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0; (node_it)->GetValue(YOUNG_MODULUS) = 0.0; } } array_1d<double, TDim + 1 > N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry<Node<3> >& geom = pelement->GetGeometry(); const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY); const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE); for (unsigned int k = 0; k < geom.size(); k++) { if (geom[k].IsFixed(VELOCITY_X) == false) { geom[k].SetLock(); geom[k].FastGetSolutionStepValue(VELOCITY) += N[k] * vel_particle; geom[k].FastGetSolutionStepValue(TEMPERATURE) += N[k] * temperature_particle; geom[k].GetValue(YOUNG_MODULUS) += N[k]; geom[k].UnSetLock(); } } } } for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++) { if (node_it->IsFixed(VELOCITY_X) == false) { const double NN = (node_it)->GetValue(YOUNG_MODULUS); if (NN != 0.0) { (node_it)->FastGetSolutionStepValue(VELOCITY) /= NN; (node_it)->FastGetSolutionStepValue(TEMPERATURE) /= NN; } else { std::cout << node_it->Id() << " coeff = " << NN << std::endl; } } } KRATOS_CATCH("") } //restarting the step from the beginning void RestartStep(ModelPart & rModelPart) { KRATOS_TRY; //setting the variables to their value at the beginning of the time step rModelPart.OverwriteSolutionStepData(1, 0); //setting the coordinates to their value at the beginning of the step for (ModelPart::NodesContainerType::iterator node_it = rModelPart.NodesBegin(); node_it != rModelPart.NodesEnd(); node_it++) { array_1d<double, 3 > & coords = node_it->Coordinates(); const array_1d<double, 3 > & old_disp = node_it->FastGetSolutionStepValue(DISPLACEMENT, 1); coords[0] = node_it->X0() + old_disp[0]; coords[1] = node_it->Y0() + old_disp[1]; coords[2] = node_it->Z0() + old_disp[2]; } KRATOS_CATCH(""); } private: inline double SPHCubicKernel(const double sigma, const double r, const double hmax) { double h_half = 0.5 * hmax; const double s = r / h_half; const double coeff = sigma / pow(h_half, static_cast<int>(TDim)); if (s <= 1.0) return coeff * (1.0 - 1.5 * s * s + 0.75 * s * s * s); else if (s <= 2.0) return 0.25 * coeff * pow(2.0 - s, 3); else return 0.0; } inline void CalculateCenterAndSearchRadius(Geometry<Node < 3 > >&geom, double& xc, double& yc, double& zc, double& R, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); xc = 0.3333333333333333333 * (x0 + x1 + x2); yc = 0.3333333333333333333 * (y0 + y1 + y2); zc = 0.0; double R1 = (xc - x0)*(xc - x0) + (yc - y0)*(yc - y0); double R2 = (xc - x1)*(xc - x1) + (yc - y1)*(yc - y1); double R3 = (xc - x2)*(xc - x2) + (yc - y2)*(yc - y2); R = R1; if (R2 > R) R = R2; if (R3 > R) R = R3; R = 1.01 * sqrt(R); } //*************************************** //*************************************** inline void CalculateCenterAndSearchRadius(Geometry<Node < 3 > >&geom, double& xc, double& yc, double& zc, double& R, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); xc = 0.25 * (x0 + x1 + x2 + x3); yc = 0.25 * (y0 + y1 + y2 + y3); zc = 0.25 * (z0 + z1 + z2 + z3); double R1 = (xc - x0)*(xc - x0) + (yc - y0)*(yc - y0) + (zc - z0)*(zc - z0); double R2 = (xc - x1)*(xc - x1) + (yc - y1)*(yc - y1) + (zc - z1)*(zc - z1); double R3 = (xc - x2)*(xc - x2) + (yc - y2)*(yc - y2) + (zc - z2)*(zc - z2); double R4 = (xc - x3)*(xc - x3) + (yc - y3)*(yc - y3) + (zc - z3)*(zc - z3); R = R1; if (R2 > R) R = R2; if (R3 > R) R = R3; if (R4 > R) R = R4; R = sqrt(R); } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.0000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions(Geometry< Node < 3 > >& geom, boost::numeric::ublas::bounded_matrix<double, 4, 3 > & pos, boost::numeric::ublas::bounded_matrix<double, 4, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 1.0 / 6.0; double two_third = 2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositions(Geometry< Node < 3 > >& geom, boost::numeric::ublas::bounded_matrix<double, 16, 3 > & pos, boost::numeric::ublas::bounded_matrix<double, 16, 3 > & N) { //lower diagonal terms double ypos = 1.0 / 12.0; int pos_counter = 0; for (unsigned int i = 0; i < 4; i++) { double xpos = 1.0 / 12.0; for (unsigned int j = 0; j < 4 - i; j++) { double N1 = xpos; double N2 = ypos; double N3 = 1.0 - xpos - ypos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; xpos += 1.0 / 4.0; pos_counter += 1; } ypos += 1.0 / 4.0; } //lower diagonal terms ypos = 2.0 / 12.0; // pos_counter = 8; for (unsigned int i = 0; i < 3; i++) { double xpos = 2.0 / 12.0; for (unsigned int j = 0; j < 4 - i; j++) { double N1 = xpos; double N2 = ypos; double N3 = 1.0 - xpos - ypos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; xpos += 1.0 / 4.0; pos_counter += 1; } ypos += 1.0 / 4.0; } } void ConsistentMassMatrix(const double A, boost::numeric::ublas::bounded_matrix<double, 3, 3 > & M) { double c1 = A / 12.0; double c2 = 2.0 * c1; M(0, 0) = c2; M(0, 1) = c1; M(0, 2) = c1; M(1, 0) = c1; M(1, 1) = c2; M(1, 2) = c1; M(2, 0) = c1; M(2, 1) = c1; M(2, 2) = c2; } }; } // namespace Kratos. #endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
evolve.h
#define FLOAT double #define CLFLOAT cl_double #define CLFLOAT4 cl_double4 #define DOUBLE long double #define INT int #define UINT unsigned int #define LONG long #define ULONG unsigned long #define RVTIMESTEP #define RATIMESTEP #define RARVRATIO 1. #define MPWORKLIMIT 1000 #define CLWORKLIMIT 40000 #define MAXLEVEL 64 #define COMPENSATED_SUMMP //~ #define COMPENSATED_SUMMV //~ #define CONSISTENCY_CHECKS // perform (time-consuming, but thorough) sanity checks struct particle { UINT id; FLOAT mass; FLOAT radius; /*only used for stopping conditions*/ DOUBLE pos[3]; DOUBLE vel[3]; #ifdef COMPENSATED_SUMMP DOUBLE pos_e[3]; #endif #ifdef COMPENSATED_SUMMV DOUBLE vel_e[3]; #endif DOUBLE pot; DOUBLE postime; FLOAT timestep; }; struct jparticle { FLOAT mass; FLOAT pos[3]; FLOAT vel[3]; }; struct sys { UINT n, nzero; // n=total particles, nzero=# zero mass particles struct particle *part; // start of particles, NULL iff n==0 struct particle *zeropart; // start of zero mass particles nzero>0, otherwise NULL }; #define GETPART(s, i) ((i)<(s).n-(s).nzero ? (s).part+(i) : (s).zeropart+((i)-((s).n-(s).nzero))) #define LAST(s) ((s).part==NULL || (s).n-(s).nzero==0 ? NULL : (s).part+((s).n-(s).nzero)-1) #define LASTZERO(s) ((s).zeropart==NULL || (s).nzero==0 ? NULL : (s).zeropart+(s).nzero-1) extern struct sys debugsys; // for monitoring purposes enum intopt { CONSTANT, // 0 SHARED2, // 1 PASS, // 2 HOLD, // 3 BRIDGE, // 4 NAIVE, // 5 VARIABLE, // 6 PASS_DKD, // 7 HOLD_DKD, // 8 PPASS_DKD, // 9 BRIDGE_DKD, // 10 CC, // 11 CC_KEPLER, // 12 OK, // 13 KEPLER, // 14 SHARED4, // 15 FOURTH_M4, // 16 FOURTH_M5, // 17 SHARED6, // 18 SHARED8, // 19 SHARED10, // 20 SHAREDBS, // 21 CCC, // 22 CCC_KEPLER, // 23 CC_BS, // 24 CCC_BS, // 25 BS_CC_KEPLER, // 26 CC_BSA, // 27 CCC_BSA, // 28 SHARED2_COLLISIONS, // 29 SHARED4_COLLISIONS, // 30 SHARED6_COLLISIONS, // 31 SHARED8_COLLISIONS, // 32 SHARED10_COLLISIONS, // 33 CONSTANT2, // 34 CONSTANT4, // 35 CONSTANT6, // 36 CONSTANT8, // 37 CONSTANT10, // 38 ERROR_CONTROL, // 39 CC_SHARED10, // 40 CCC_SHARED10 // 41 }; extern int verbosity; extern FLOAT eps2; extern FLOAT dt_param; #pragma omp threadprivate(dt_param) extern int accel_zero_mass; extern struct sys zerosys; extern int fixed_j; extern DOUBLE bs_target_error; extern int opencl_device_type; /* diagnostics */ struct diagnostics { DOUBLE simtime; DOUBLE timetrack; unsigned long deepsteps; unsigned long tcount[MAXLEVEL],kcount[MAXLEVEL],dcount[MAXLEVEL]; unsigned long tstep[MAXLEVEL],kstep[MAXLEVEL],dstep[MAXLEVEL]; unsigned long cefail[MAXLEVEL],cecount[MAXLEVEL]; // call/fail counts of the Kepler solver unsigned long bsstep[MAXLEVEL],jcount[MAXLEVEL]; /* count + jcount of BS evolve */ #ifdef EVOLVE_OPENCL unsigned long cpu_step,cl_step,cpu_count,cl_count; #endif int ntasks[MAXLEVEL],taskcount[MAXLEVEL]; unsigned long taskdrift,taskkick; }; extern struct diagnostics global_diag; extern struct diagnostics *diag; #pragma omp threadprivate(diag) void init_code(); void stop_code(); void init_evolve(struct sys s, int inttype); void do_evolve(struct sys s, double dt, int inttype); void system_center_of_mass(struct sys s, DOUBLE *cmpos, DOUBLE *cmvel); void move_system(struct sys s, DOUBLE dpos[3],DOUBLE dvel[3],int dir); DOUBLE system_potential_energy(struct sys s); DOUBLE system_kinetic_energy(struct sys s); void drift(int clevel,struct sys s, DOUBLE etime, DOUBLE dt); /* drift sys */ void kick(int clevel,struct sys s1, struct sys s2, DOUBLE dt); /* =kick sys1 for interactions with sys2 */ void kdk(int clevel,struct sys s1,struct sys s2, DOUBLE stime, DOUBLE etime, DOUBLE dt); void dkd(int clevel,struct sys s1,struct sys s2, DOUBLE stime, DOUBLE etime, DOUBLE dt); void timestep(int clevel,struct sys s1, struct sys s2,int dir); FLOAT timestep_ij(struct particle *i, struct particle *j,int dir); FLOAT global_timestep(struct sys s); FLOAT max_global_timestep(struct sys s); void potential(struct sys s1, struct sys s2); struct sys join(struct sys s1,struct sys s2); void split_zeromass(struct sys *s); void verify_split_zeromass(struct sys s); #define SWAP(a,b,c) {c t;t=(a);(a)=(b);(b)=t;} #define ABS(X) (((X) >= 0) ? (X) : -(X)) #define SIGN(X) ((X>0)-(X<0)) #define LOG(fmt, ...) {\ printf("%s:%d\t", __FILE__, __LINE__);\ printf(fmt, ## __VA_ARGS__);\ } #define ENDRUN(fmt, ...) { \ printf("ENDRUN at %s:%d ", __FILE__, __LINE__);\ printf(fmt, ## __VA_ARGS__);\ fflush(stdout);\ exit(-1);\ } #ifdef COMPENSATED_SUMMP #define COMPSUMP(sum,err,delta) \ { \ DOUBLE a; \ a=sum; \ err=err+delta; \ sum=a+err; \ err=err+(a-sum); \ } #else #define COMPSUMP(sum,err,delta) {sum+=delta;} #endif #ifdef COMPENSATED_SUMMV #define COMPSUMV(sum,err,delta) \ { \ DOUBLE a; \ a=sum; \ err=err+delta; \ sum=a+err; \ err=err+(a-sum); \ } #else #define COMPSUMV(sum,err,delta) {sum+=delta;} #endif #define COMPSUM(sum,err,delta) \ { \ DOUBLE a; \ a=sum; \ err=err+delta; \ sum=a+err; \ err=err+(a-sum); \ } #define COMPSUM1(sum,err,delta) \ { \ DOUBLE t,y; \ y=(delta)-err; \ t=sum+y; \ err=(t-sum)-y; \ sum=t; \ } #define CHECK_TIMESTEP(etime,stime,dt,clevel) \ if(sizeof(dt)==sizeof(long double)) { \ if(etime == stime || dt==0 || clevel>=MAXLEVEL) \ ENDRUN("timestep too small: etime=%Le stime=%Le dt=%Le clevel=%u\n", etime, stime, dt, clevel); \ } else { \ if(etime == stime || dt==0 || clevel>=MAXLEVEL) \ ENDRUN("timestep too small: etime=%le stime=%le dt=%le clevel=%u\n", (double) etime, (double) stime, (double) dt, clevel); \ }
ordered_doacross_codegen.c
// RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: [[KMP_DIM:%.+]] = type { i64, i64, i64 } extern int n; int a[10], b[10], c[10], d[10]; void foo(); // CHECK-LABEL: @main() int main() { int i; // CHECK: [[DIMS:%.+]] = alloca [1 x [[KMP_DIM]]], // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]]) // CHECK: icmp // CHECK-NEXT: br i1 % // CHECK: [[CAST:%.+]] = bitcast [1 x [[KMP_DIM]]]* [[DIMS]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 24, i1 false) // CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 1 // CHECK: store i64 %{{.+}}, i64* % // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 2 // CHECK: store i64 1, i64* % // CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0 // CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIM]] to i8* // CHECK: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 1, i8* [[CAST]]) // CHECK: call void @__kmpc_for_static_init_4( #pragma omp for ordered(1) for (i = 0; i < n; ++i) { a[i] = b[i] + 1; foo(); // CHECK: call void [[FOO:.+]]( // CHECK: load i32, i32* [[I:%.+]], // CHECK-NEXT: sub nsw i32 %{{.+}}, 0 // CHECK-NEXT: sdiv i32 %{{.+}}, 1 // CHECK-NEXT: sext i32 %{{.+}} to i64 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]], // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0 // CHECK-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]]) #pragma omp ordered depend(source) c[i] = c[i] + 1; foo(); // CHECK: call void [[FOO]] // CHECK: load i32, i32* [[I]], // CHECK-NEXT: sub nsw i32 %{{.+}}, 2 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0 // CHECK-NEXT: sdiv i32 %{{.+}}, 1 // CHECK-NEXT: sext i32 %{{.+}} to i64 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]], // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0 // CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]]) #pragma omp ordered depend(sink : i - 2) d[i] = a[i - 2]; } // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]]) // CHECK: ret i32 0 return 0; } #endif // HEADER
AlloyVolume.h
/* * Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef ALLOYIMAGE3D_H_INCLUDE_GUARD #define ALLOYIMAGE3D_H_INCLUDE_GUARD #include "AlloyCommon.h" #include "AlloyMath.h" #include "sha2.h" #include "AlloyFileUtil.h" #include "AlloyImage.h" #include "cereal/types/vector.hpp" #include <vector> #include <functional> #include <fstream> #include <random> namespace aly { template<class T, int C, ImageType I> struct Volume { private: std::string hashCode; int x, y, z; public: std::vector<vec<T, C>> data; typedef vec<T, C> ValueType; typedef typename std::vector<ValueType>::iterator iterator; typedef typename std::vector<ValueType>::const_iterator const_iterator; typedef typename std::vector<ValueType>::reverse_iterator reverse_iterator; iterator begin() { return data.begin(); } iterator end() { return data.end(); } const_iterator cbegin() const { return data.cbegin(); } const_iterator cend() const { return data.cend(); } reverse_iterator rbegin() { return data.rbegin(); } reverse_iterator rend() { return data.rend(); } reverse_iterator rbegin() const { return data.rbegin(); } reverse_iterator rend() const { return data.rend(); } int rows; int cols; int slices; uint64_t id; const int channels = C; const ImageType type = I; int3 position() const { return int3(x, y, z); } void setPosition(const int3& pos) { x = pos.x; y = pos.y; z = pos.z; } std::string updateHashCode(size_t MAX_SAMPLES = 0, HashMethod method = HashMethod::SHA256); std::string getHashCode() { return hashCode; } template<class Archive> void serialize(Archive & archive) { archive(cereal::make_nvp(MakeString() << type << channels, id), CEREAL_NVP(rows), CEREAL_NVP(cols), CEREAL_NVP(slices), CEREAL_NVP(x), CEREAL_NVP(y), CEREAL_NVP(z), CEREAL_NVP(hashCode)); } void writeToXML(const std::string& fileName) const { WriteImageToRawFile(fileName, *this); } void set(const T& val) { data.assign(data.size(), vec<T, C>(val)); } void set(const vec<T, C>& val) { data.assign(data.size(), val); } void set(const std::vector<vec<T, C>>& val) { data = val; } void set(T* val) { if (val == nullptr) return; size_t offset = 0; for (vec<T, C>& x : data) { for (int c = 0; c < C; c++) { x[c] = val[offset++]; } } } void set(vec<T, C>* val) { if (val == nullptr) return; size_t offset = 0; for (vec<T, C>& x : data) { x = val[offset++]; } } void set(const Volume<T, C, I>& other) { resize(other.rows, other.cols, other.slices); id = other.id; x = other.x; y = other.y; z = other.z; set(&other.data[0]); } std::string getTypeName() const { return MakeString() << type << channels; } Volume(int r, int c, int s, int x = 0, int y = 0, int z = 0, uint64_t id = 0) : x(x), y(y), z(z), data(r * c * s) ,rows(r), cols(c), slices(s), id(id){ } Volume(int r, int c, int s, int3 pos, uint64_t id = 0) : x(pos.x), y(pos.y), z(pos.z), data(r * c * s) , rows(r), cols(c), slices(s), id(id){ } Volume(T* ptr, int r, int c, int s, int x = 0, int y = 0, int z = 0, uint64_t id = 0) : Volume(r, c, s, x, y, z, id) { set(ptr); } Volume(vec<T, C>* ptr, int r, int c, int s, int x = 0, int y = 0, int z = 0, uint64_t id = 0) : Volume(r, c, s, x, y, z, id) { set(ptr); } Volume(std::vector<vec<T, C>>& ref, int r, int c, int s, int x = 0, int y = 0, int z = 0, uint64_t id = 0) : x(x), y(y), z(z), data(ref) , rows(r), cols(c), slices(s), id(id){ } Volume() : x(0), y(0), z(0), rows(0), cols(0), slices(0), id(0) { } Volume(const Volume<T, C, I>& img) : Volume(img.rows, img.cols, img.slices, img.position(), img.id) { set(img.data); } Volume<T, C, I>& operator=(const Volume<T, C, I>& rhs) { if (this == &rhs) return *this; this->resize(rhs.rows, rhs.cols, rhs.slices); this->setPosition(rhs.position()); this->id = rhs.id; this->set(rhs.data); return *this; } int3 dimensions() const { return int3(rows, cols, slices); } size_t size() const { return data.size(); } size_t typeSize() const { return sizeof(vec<T, C>); } void resize(int r, int c, int s) { data.resize(r * c * s); data.shrink_to_fit(); rows = r; cols = c; slices = s; } inline void clear() { data.clear(); data.shrink_to_fit(); rows = 0; cols = 0; slices = 0; } vec<T, C>* vecPtr() { if (data.size() == 0) return nullptr; return data.data(); } const vec<T, C>* vecPtr() const { if (data.size() == 0) return nullptr; return data.data(); } T* ptr() { if (data.size() == 0) return nullptr; return &(data.front()[0]); } const T* ptr() const { if (data.size() == 0) return nullptr; return &(data.front()[0]); } void setZero() { data.assign(data.size(), vec<T, C>((T)0)); } const vec<T, C>& operator[](const size_t i) const { return data[i]; } vec<T, C>& operator[](const size_t i) { return data[i]; } vec<T, C>& operator()(const int i, const int j, const int k) { return data[clamp(i, 0, rows - 1) + clamp(j, 0, cols - 1) * rows + clamp(k, 0, slices - 1) * rows * cols]; } vec<T, C>& operator()(const size_t i, const size_t j, const size_t k) { return data[clamp((int)i, 0, rows - 1) + clamp((int)j, 0, cols - 1) * rows + clamp((int)k, 0, slices - 1) * rows * cols]; } vec<T, C>& operator()(const int3 ijk) { return data[clamp(ijk.x, 0, rows - 1) + clamp(ijk.y, 0, cols - 1) * rows + clamp(ijk.z, 0, slices - 1) * rows * cols]; } const vec<T, C>& operator()(const int i, const int j, const int k) const { return data[clamp(i, 0, rows - 1) + clamp(j, 0, cols - 1) * rows + clamp(k, 0, slices - 1) * rows * cols]; } const vec<T, C>& operator()(const size_t i, const size_t j, const size_t k) const { return data[clamp((int)i, 0, rows - 1) + clamp((int)j, 0, cols - 1) * rows + clamp((int)k, 0, slices - 1) * rows * cols]; } const vec<T, C>& operator()(const int3 ijk) const { return data[clamp(ijk.x, 0, rows - 1) + clamp(ijk.y, 0, cols - 1) * rows + clamp(ijk.z, 0, slices - 1) * rows * cols]; } template<class K> vec<K, C> operator()(const K x, const K y, const K z) { int i = static_cast<int>(std::floor(x)); int j = static_cast<int>(std::floor(y)); int k = static_cast<int>(std::floor(z)); vec<K, C> rgb000 = ConvertType<K, T, C>(operator()(i, j, k)); vec<K, C> rgb100 = ConvertType<K, T, C>(operator()(i + 1, j, k)); vec<K, C> rgb110 = ConvertType<K, T, C>(operator()(i + 1, j + 1, k)); vec<K, C> rgb010 = ConvertType<K, T, C>(operator()(i, j + 1, k)); vec<K, C> rgb001 = ConvertType<K, T, C>(operator()(i, j, k + 1)); vec<K, C> rgb101 = ConvertType<K, T, C>(operator()(i + 1, j, k + 1)); vec<K, C> rgb111 = ConvertType<K, T, C>( operator()(i + 1, j + 1, k + 1)); vec<K, C> rgb011 = ConvertType<K, T, C>(operator()(i, j + 1, k + 1)); K dx = x - i; K dy = y - j; K dz = z - k; vec<K, C> lower = ((rgb000 * (K(1) - dx) + rgb100 * dx) * (K(1) - dy) + (rgb010 * (K(1) - dx) + rgb110 * dx) * dy); vec<K, C> upper = ((rgb001 * (K(1) - dx) + rgb101 * dx) * (K(1) - dy) + (rgb011 * (K(1) - dx) + rgb111 * dx) * dy); return (K(1) - dz) * lower + dz * upper; } template<class K> inline vec<K, C> operator()(const vec<K, 3>& pt) { return operator()(pt.x, pt.y, pt.z); } template<class F> void apply(F f) { size_t sz = size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { f(offset, data[offset]); } } void downSample(Volume<T, C, I>& out) const { static const double Kernel[3][3][3] = { { { 0, 1, 0 }, { 1, 4, 1 }, { 0, 1, 0 } }, { { 1, 4, 1 }, { 4, 12, 4 }, { 1, 4, 1 } }, { { 0, 1, 0 }, { 1, 4, 1 }, { 0, 1, 0 } } }; out.resize(rows / 2, cols / 2, slices / 2); #pragma omp parallel for for (int i = 0; i < out.rows; i++) { for (int j = 0; j < out.cols; j++) { for (int k = 0; k < out.slices; k++) { vec<double, C> vsum(0.0); for (int ii = 0; ii < 3; ii++) { for (int jj = 0; jj < 3; jj++) { for (int kk = 0; kk < 3; kk++) { vsum += Kernel[ii][jj][kk] * vec<double, C>( operator()(2 * i + ii - 1, 2 * j + jj - 1, 2 * k + kk - 1)); } } } out(i, j, k) = vec<T, C>(vsum / 48.0); } } } } void upSample(Volume<T, C, I>& out) const { static const double Kernel[3][3][3] = { { { 0, 1, 0 }, { 1, 4, 1 }, { 0, 1, 0 } }, { { 1, 4, 1 }, { 4, 12, 4 }, { 1, 4, 1 } }, { { 0, 1, 0 }, { 1, 4, 1 }, { 0, 1, 0 } } }; if (out.size() == 0) out.resize(rows * 2, cols * 2, slices * 2); #pragma omp parallel for for (int i = 0; i < out.rows; i++) { for (int j = 0; j < out.cols; j++) { for (int k = 0; k < out.slices; k++) { vec<double, C> vsum(0.0); for (int ii = 0; ii < 3; ii++) { for (int jj = 0; jj < 3; jj++) { for (int kk = 0; kk < 3; kk++) { int iii = i + ii - 1; int jjj = j + jj - 1; int kkk = k + kk - 1; if (iii % 2 == 0 && jjj % 2 == 0 && kkk % 2 == 0) { vsum += Kernel[ii][jj][kk] * vec<double, C>( operator()(iii / 2, jjj / 2, kkk / 2)); } } } out(i, j, k) = vec<T, C>(vsum / 6.0); } } } } } Volume<T, C, I> downSample() const { Volume<T, C, I> out; downSample(out); return out; } Volume<T, C, I> upSample() const { Volume<T, C, I> out; upSample(out); return out; } vec<T, C> min() const { vec<T, C> minVal(std::numeric_limits<T>::max()); for (vec<T, C>& val : data) { minVal = aly::minVec(val, minVal); } return minVal; } vec<T, C> max() const { vec<T, C> maxVal(std::numeric_limits<T>::min()); for (vec<T, C>& val : data) { maxVal = aly::maxVec(val, maxVal); } return maxVal; } std::pair<vec<T, C>, vec<T, C>> range() const { vec<T, C> maxVal(std::numeric_limits<T>::min()); vec<T, C> minVal(std::numeric_limits<T>::max()); for (vec<T, C>& val : data) { maxVal = aly::maxVec(val, maxVal); minVal = aly::minVec(val, minVal); } return std::pair<vec<T, C>, vec<T, C>>(minVal, maxVal); } vec<T, C> mean() const { vec<double, C> mean(0.0); for (vec<T, C>& val : data) { mean += vec<double, C>(val); } mean = mean / (double)data.size(); return vec<T, C>(mean); } vec<T, C> median() const { std::vector<T> bands[C]; for (int c = 0; c < C; c++) { bands[c].resize(data.size()); } size_t index = 0; for (vec<T, C>& val : data) { for (int c = 0; c < C; c++) { bands[c][index] = val[c]; } index++; } #pragma omp parallel for for (int c = 0; c < C; c++) { std::sort(bands[c].begin(), bands[c].end()); } vec<T, C> med; if (data.size() % 2 == 0) { for (int c = 0; c < C; c++) { med[c] = T( ((double)bands[c][data.size() / 2] + (double)bands[c][data.size() / 2 - 1]) * 0.5f); } } else { for (int c = 0; c < C; c++) { med[c] = bands[c][data.size() / 2]; } } return med; } vec<T, C> mad() const { if (data.size() <= 2) return vec<T, C>(T(0)); vec<T, C> med = median(); std::vector<T> bands[C]; for (int c = 0; c < C; c++) { bands[c].resize(data.size()); } size_t index = 0; for (vec<T, C>& val : data) { vec<T, C> e = aly::abs(val - med); for (int c = 0; c < C; c++) { bands[c][index] = e[c]; } index++; } #pragma omp parallel for for (int c = 0; c < C; c++) { std::sort(bands[c].begin(), bands[c].end()); } vec<T, C> mad; if (data.size() % 2 == 0) { for (int c = 0; c < C; c++) { mad[c] = T( ((double)bands[c][data.size() / 2] + (double)bands[c][data.size() / 2 - 1]) * 0.5f); } } else { for (int c = 0; c < C; c++) { mad[c] = bands[c][data.size() / 2]; } } return mad; } vec<T, C> madStdDev() const { return vec<T, C>(1.4826 * vec<double, C>(mad())); } vec<T, C> stdDev() const { if (data.size() < 2) { return vec<T, C>(T(0)); } vec<T, C> avg = mean(); vec<double, C> var(0.0); for (vec<T, C>& val : data) { vec<double, C> e = vec<double, C>(val - avg); var += e * e; } var = var / (double)(data.size() - 1); return vec<T, C>(aly::sqrt(var)); } } ; template<class T, int C, ImageType I> std::string Volume<T, C, I>::updateHashCode( size_t MAX_SAMPLES, HashMethod method) { if (MAX_SAMPLES == 0) { hashCode = HashCode(data, method); } else { const size_t seed = 8743128921; std::mt19937 mt(seed); std::uniform_int_distribution<int> wSampler(0, rows - 1); std::uniform_int_distribution<int> hSampler(0, cols - 1); std::uniform_int_distribution<int> dSampler(0, slices - 1); std::vector<vec<T, C>> sample(MAX_SAMPLES); for (int i = 0; i < MAX_SAMPLES; i++) { sample[i] = this->operator()(wSampler(mt), hSampler(mt), dSampler(mt)); } hashCode = HashCode(sample, method); } return hashCode; } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, Volume<T, C, I>& im2, const std::function<void(vec<T, C>&, vec<T, C>&)>& func) { if (im1.dimensions() != im2.dimensions()) throw std::runtime_error( MakeString() << "Volume dimensions do not match. " << im1.dimensions() << "!=" << im2.dimensions()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { func(im1.data[offset], im2.data[offset]); } } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, const Volume<T, C, I>& im2, const Volume<T, C, I>& im3, const Volume<T, C, I>& im4, const std::function< void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&, const vec<T, C>&)>& func) { if (im1.dimensions() != im2.dimensions()) throw std::runtime_error( MakeString() << "Volume dimensions do not match. " << im1.dimensions() << "!=" << im2.dimensions()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { func(im1.data[offset], im2.data[offset], im3.data[offset], im4.data[offset]); } } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, const std::function<void(vec<T, C>&)>& func) { size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { func(im1.data[offset]); } } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, const Volume<T, C, I>& im2, const std::function<void(vec<T, C>&, const vec<T, C>&)>& func) { if (im1.dimensions() != im2.dimensions()) throw std::runtime_error( MakeString() << "Volume dimensions do not match. " << im1.dimensions() << "!=" << im2.dimensions()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { func(im1.data[offset], im2.data[offset]); } } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, const Volume<T, C, I>& im2, const Volume<T, C, I>& im3, const std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)>& func) { if (im1.dimensions() != im2.dimensions()) throw std::runtime_error( MakeString() << "Volume dimensions do not match. " << im1.dimensions() << "!=" << im2.dimensions()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { func(im1.data[offset], im2.data[offset], im3.data[offset]); } } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, Volume<T, C, I>& im2, const std::function< void(int i, int j, int k, vec<T, C>& val1, vec<T, C>& val2)>& func) { if (im1.dimensions() != im2.dimensions()) throw std::runtime_error( MakeString() << "Volume dimensions do not match. " << im1.dimensions() << "!=" << im2.dimensions()); #pragma omp parallel for for (int k = 0; k < im1.slices; k++) { for (int j = 0; j < im1.cols; j++) { for (int i = 0; i < im1.rows; i++) { size_t offset = i + j * im1.rows + k * im1.rows * im1.cols; func(i, j, k, im1.data[offset], im2.data[offset]); } } } } template<class T, int C, ImageType I> void Transform(Volume<T, C, I>& im1, Volume<T, C, I>& im2, const std::function< void(size_t offset, vec<T, C>& val1, vec<T, C>& val2)>& func) { if (im1.dimensions() != im2.dimensions()) throw std::runtime_error( MakeString() << "Volume dimensions do not match. " << im1.dimensions() << "!=" << im2.dimensions()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int)sz; offset++) { func(offset, im1.data[offset], im2.data[offset]); } } template<class T, class L, class R, int C, ImageType I> std::basic_ostream<L, R> & operator <<( std::basic_ostream<L, R> & ss, const Volume<T, C, I> & A) { ss << "Volume (" << A.getTypeName() << "): " << A.id << " Position: (" << A.x << "," << A.y << ") Dimensions: [" << A.rows << "," << A.cols << "]\n"; return ss; } template<class T, int C, ImageType I> Volume<T, C, I> operator+( const vec<T, C>& scalar, const Volume<T, C, I>& img) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar + val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator-( const vec<T, C>& scalar, const Volume<T, C, I>& img) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar - val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator*( const vec<T, C>& scalar, const Volume<T, C, I>& img) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar*val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator/( const vec<T, C>& scalar, const Volume<T, C, I>& img) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar / val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator+( const Volume<T, C, I>& img, const vec<T, C>& scalar) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2 + scalar;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator-( const Volume<T, C, I>& img, const vec<T, C>& scalar) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2 - scalar;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator*( const Volume<T, C, I>& img, const vec<T, C>& scalar) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2*scalar;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator/( const Volume<T, C, I>& img, const vec<T, C>& scalar) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2 / scalar;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator-( const Volume<T, C, I>& img) { Volume<T, C, I> out(img.rows, img.cols, img.slices, img.position()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = -val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator+=( Volume<T, C, I>& out, const Volume<T, C, I>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 += val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator-=( Volume<T, C, I>& out, const Volume<T, C, I>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 -= val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator*=( Volume<T, C, I>& out, const Volume<T, C, I>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 *= val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator/=( Volume<T, C, I>& out, const Volume<T, C, I>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 /= val2;}; Transform(out, img, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator+=( Volume<T, C, I>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 += scalar;}; Transform(out, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator-=( Volume<T, C, I>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 -= scalar;}; Transform(out, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator*=( Volume<T, C, I>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 *= scalar;}; Transform(out, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator/=( Volume<T, C, I>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 /= scalar;}; Transform(out, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator+( const Volume<T, C, I>& img1, const Volume<T, C, I>& img2) { Volume<T, C, I> out(img1.rows, img1.cols, img1.slices); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 + val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator-( const Volume<T, C, I>& img1, const Volume<T, C, I>& img2) { Volume<T, C, I> out(img1.rows, img1.cols, img1.slices); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 - val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator*( const Volume<T, C, I>& img1, const Volume<T, C, I>& img2) { Volume<T, C, I> out(img1.rows, img1.cols, img1.slices); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2*val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C, ImageType I> Volume<T, C, I> operator/( const Volume<T, C, I>& img1, const Volume<T, C, I>& img2) { Volume<T, C, I> out(img1.rows, img1.cols, img1.slices); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 / val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C, ImageType I> void WriteImageToRawFile( const std::string& file, const Volume<T, C, I>& img) { std::ostringstream vstr; std::string fileName = GetFileWithoutExtension(file); vstr << fileName << ".raw"; FILE* f = fopen(vstr.str().c_str(), "wb"); if (f == NULL) { throw std::runtime_error( MakeString() << "Could not open " << vstr.str().c_str() << " for writing."); } for (int c = 0; c < img.channels; c++) { for (int k = 0; k < img.slices; k++) { for (int j = 0; j < img.cols; j++) { for (int i = 0; i < img.rows; i++) { T val = img(i, j, k)[c]; fwrite(&val, sizeof(T), 1, f); } } } } fclose(f); std::string typeName = ""; switch (img.type) { case ImageType::BYTE: typeName = "Byte"; break; case ImageType::UBYTE: typeName = "Unsigned Byte"; break; case ImageType::SHORT: typeName = "Short"; break; case ImageType::USHORT: typeName = "Unsigned Short"; break; case ImageType::INT: typeName = "Integer"; break; case ImageType::UINT: typeName = "Unsigned Integer"; break; case ImageType::FLOAT: typeName = "Float"; break; case ImageType::DOUBLE: typeName = "Double"; break; } //std::cout << vstr.str() << std::endl; std::stringstream sstr; sstr << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"; sstr << "<!-- MIPAV header file -->\n"; if (img.channels > 1) { sstr << "<image xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" nDimensions=\"4\">\n"; } else { sstr << "<image xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" nDimensions=\"3\">\n"; } sstr << " <Dataset-attributes>\n"; sstr << " <Image-offset>0</Image-offset>\n"; sstr << " <Data-type>" << typeName << "</Data-type>\n"; sstr << " <Endianess>Little</Endianess>\n"; sstr << " <Extents>" << img.rows << "</Extents>\n"; sstr << " <Extents>" << img.cols << "</Extents>\n"; sstr << " <Extents>" << img.slices << "</Extents>\n"; if (img.channels > 1) { sstr << " <Extents>" << img.channels << "</Extents>\n"; } sstr << " <Resolutions>\n"; sstr << " <Resolution>1.0</Resolution>\n"; sstr << " <Resolution>1.0</Resolution>\n"; sstr << " <Resolution>1.0</Resolution>\n"; sstr << " </Resolutions>\n"; sstr << " <Slice-spacing>1.0</Slice-spacing>\n"; sstr << " <Slice-thickness>0.0</Slice-thickness>\n"; sstr << " <Units>Millimeters</Units>\n"; sstr << " <Units>Millimeters</Units>\n"; sstr << " <Units>Millimeters</Units>\n"; sstr << " <Compression>none</Compression>\n"; sstr << " <Orientation>Unknown</Orientation>\n"; sstr << " <Subject-axis-orientation>Unknown</Subject-axis-orientation>\n"; sstr << " <Subject-axis-orientation>Unknown</Subject-axis-orientation>\n"; sstr << " <Subject-axis-orientation>Unknown</Subject-axis-orientation>\n"; sstr << " <Origin>0.0</Origin>\n"; sstr << " <Origin>0.0</Origin>\n"; sstr << " <Origin>0.0</Origin>\n"; sstr << " <Modality>Unknown Modality</Modality>\n"; sstr << " </Dataset-attributes>\n"; sstr << "</image>\n"; std::ofstream myfile; std::stringstream xmlFile; xmlFile << fileName << ".xml"; myfile.open(xmlFile.str().c_str(), std::ios_base::out); if (!myfile.is_open()) { throw std::runtime_error( MakeString() << "Could not open " << xmlFile.str() << " for writing."); } myfile << sstr.str(); myfile.close(); } typedef Volume<uint8_t, 4, ImageType::UBYTE> VolumeRGBA; typedef Volume<int, 4, ImageType::INT> VolumeRGBAi; typedef Volume<float, 4, ImageType::FLOAT> VolumeRGBAf; typedef Volume<uint8_t, 3, ImageType::UBYTE> VolumeRGB; typedef Volume<int, 3, ImageType::INT> VolumeRGBi; typedef Volume<float, 3, ImageType::FLOAT> VolumeRGBf; typedef Volume<uint8_t, 1, ImageType::UBYTE> VolumeA; typedef Volume<int, 1, ImageType::INT> VolumeAi; typedef Volume<float, 1, ImageType::FLOAT> VolumeAf; typedef Volume<int8_t, 4, ImageType::BYTE> Volume4b; typedef Volume<uint8_t, 4, ImageType::UBYTE> Volume4ub; typedef Volume<uint16_t, 4, ImageType::USHORT> Volume4us; typedef Volume<int16_t, 4, ImageType::SHORT> Volume4s; typedef Volume<int, 4, ImageType::INT> Volume4i; typedef Volume<uint32_t, 4, ImageType::UINT> Volume4ui; typedef Volume<float, 4, ImageType::FLOAT> Volume4f; typedef Volume<int8_t, 3, ImageType::BYTE> Volume3b; typedef Volume<uint8_t, 3, ImageType::UBYTE> Volume3ub; typedef Volume<uint16_t, 3, ImageType::USHORT> Volume3us; typedef Volume<int16_t, 3, ImageType::SHORT> Volume3s; typedef Volume<int, 3, ImageType::INT> Volume3i; typedef Volume<uint32_t, 3, ImageType::UINT> Volume3ui; typedef Volume<float, 3, ImageType::FLOAT> Volume3f; typedef Volume<int8_t, 2, ImageType::BYTE> Volume2b; typedef Volume<uint8_t, 2, ImageType::UBYTE> Volume2ub; typedef Volume<uint16_t, 2, ImageType::USHORT> Volume2us; typedef Volume<int16_t, 2, ImageType::SHORT> Volume2s; typedef Volume<int, 2, ImageType::INT> Volume2i; typedef Volume<uint32_t, 2, ImageType::UINT> Volume2ui; typedef Volume<float, 2, ImageType::FLOAT> Volume2f; typedef Volume<int8_t, 1, ImageType::BYTE> Volume1b; typedef Volume<uint8_t, 1, ImageType::UBYTE> Volume1ub; typedef Volume<uint16_t, 1, ImageType::USHORT> Volume1us; typedef Volume<int16_t, 1, ImageType::SHORT> Volume1s; typedef Volume<int, 1, ImageType::INT> Volume1i; typedef Volume<uint32_t, 1, ImageType::UINT> Volume1ui; typedef Volume<float, 1, ImageType::FLOAT> Volume1f; } ; #endif
math_array.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_CONTAINER_MATH_ARRAY_H_ #define CORE_CONTAINER_MATH_ARRAY_H_ #include <algorithm> #include <cassert> #include <cmath> #include <numeric> #include <ostream> #include <stdexcept> #include <utility> #include "core/util/root.h" namespace bdm { /// Array with a fixed number of elements. It implements the same behaviour /// of the standard `std::array<T, N>` container. However, it provides also /// several custom mathematical operations (e.g. Sum(), Norm() etc.). template <class T, std::size_t N> class MathArray { // NOLINT public: /// Default constructor MathArray() { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] = T(); } } /// Constructor which accepts an std::initiliazer_list to set /// the array's content. /// \param l an initializer list constexpr MathArray(std::initializer_list<T> l) { assert(l.size() <= N); auto it = l.begin(); for (uint64_t i = 0; i < N; i++) { data_[i] = *(it++); } for (uint64_t i = l.size(); i < N; i++) { data_[i] = T(); } } /// Return a pointer to the underlying data. /// \return cont T pointer to the first entry of the array. inline const T* data() const { return &data_[0]; } // NOLINT /// Return the size of the array. /// \return integer denoting the array's size. inline const size_t size() const { return N; } // NOLINT /// Check if the array is empty. /// \return true if size() == 0, false otherwise. inline const bool empty() const { return N == 0; } // NOLINT /// Overloaded array subscript operator. It does not perform /// any boundary checks. /// \param idx element's index. /// \return the requested element. T& operator[](size_t idx) { return data_[idx]; } /// Const overloaded array subscript operator. /// \param idx element's index. /// \return the requested element. const T& operator[](size_t idx) const { return data_[idx]; } /// Returns the element at the given position. It will throw /// an std::out_of_range exception if the given index is out /// of the array's boundaries. /// \param idx the index of the element. /// \return the requested element. T& at(size_t idx) noexcept(false) { // NOLINT if (idx > size() || idx < 0) { throw std::out_of_range("The index is out of range"); } return data_[idx]; } const T* begin() const { return &(data_[0]); } // NOLINT const T* end() const { return &(data_[N]); } // NOLINT T* begin() { return &(data_[0]); } // NOLINT T* end() { return &(data_[N]); } // NOLINT /// Returns the element at the beginning of the array. /// \return first element. T& front() { return *(this->begin()); } // NOLINT /// Return the element at the end of the array. /// \return last element. T& back() { // NOLINT auto tmp = this->end(); tmp--; return *tmp; } /// Assignment operator. /// \param other the other MathArray instance. /// \return the current MathArray. MathArray& operator=(const MathArray& other) { if (this != &other) { assert(other.size() == N); std::copy(other.data_, other.data_ + other.size(), data_); } return *this; } /// Equality operator. /// \param other a MathArray instance. /// \return true if they have the same content, false otherwise. bool operator==(const MathArray& other) const { if (other.size() != N) { return false; } for (size_t i = 0; i < N; i++) { if (other[i] != data_[i]) { return false; } } return true; } MathArray& operator++() { #pragma omp simd for (size_t i = 0; i < N; i++) { ++data_[i]; } return *this; } MathArray operator++(int) { MathArray tmp(*this); operator++(); return tmp; } MathArray& operator--() { #pragma omp simd for (size_t i = 0; i < N; i++) { --data_[i]; } return *this; } MathArray operator--(int) { MathArray tmp(*this); operator--(); return tmp; } MathArray& operator+=(const MathArray& rhs) { assert(N == rhs.size()); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] += rhs[i]; } return *this; } MathArray operator+(const MathArray& rhs) { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs[i]; } return tmp; } const MathArray operator+(const MathArray& rhs) const { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs[i]; } return tmp; } MathArray& operator+=(const T& rhs) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] += rhs; } return *this; } MathArray operator+(const T& rhs) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs; } return tmp; } MathArray& operator-=(const MathArray& rhs) { assert(size() == rhs.size()); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] -= rhs[i]; } return *this; } MathArray operator-(const MathArray& rhs) { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs[i]; } return tmp; } const MathArray operator-(const MathArray& rhs) const { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs[i]; } return tmp; } MathArray& operator-=(const T& rhs) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] -= rhs; } return *this; } MathArray operator-(const T& rhs) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs; } return tmp; } T& operator*=(const MathArray& rhs) = delete; T operator*(const MathArray& rhs) { assert(size() == rhs.size()); T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * rhs[i]; } return result; } const T operator*(const MathArray& rhs) const { assert(size() == rhs.size()); T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * rhs[i]; } return result; } MathArray& operator*=(const T& k) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] *= k; } return *this; } MathArray operator*(const T& k) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] * k; } return tmp; } const MathArray operator*(const T& k) const { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] * k; } return tmp; } MathArray& operator/=(const T& k) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] /= k; } return *this; } MathArray operator/(const T& k) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] / k; } return tmp; } /// Fill the MathArray with a constant value. /// \param k the constant value /// \return the array MathArray& fill(const T& k) { // NOLINT std::fill(std::begin(data_), std::end(data_), k); return *this; } /// Return the sum of all the array's elements. /// \return sum of the array's content. T Sum() const { return std::accumulate(begin(), end(), 0); } /// Compute the norm of the array's content. /// \return array's norm. T Norm() const { T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * data_[i]; } result = std::sqrt(result); return result == 0 ? 1.0 : result; } /// Normalize the array. It will be done in-place. /// \return the normalized array. MathArray& Normalize() { T norm = Norm(); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] /= norm; } return *this; } /// Compute the entry wise product given another array /// of the same size. /// \param rhs the other array /// \return a new array with the result MathArray EntryWiseProduct(const MathArray& rhs) { assert(rhs.size() == N); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; ++i) { tmp[i] = data_[i] * rhs[i]; } return tmp; } private: T data_[N]; BDM_CLASS_DEF_NV(MathArray, 1); // NOLINT }; template <class T, std::size_t N> std::ostream& operator<<(std::ostream& o, const MathArray<T, N>& arr) { for (size_t i = 0; i < N; i++) { o << arr[i]; if (i != N - 1) { o << ", "; } } return o; } /// Alias for a size 3 MathArray using Double3 = MathArray<double, 3>; /// Alias for a size 4 MathArray using Double4 = MathArray<double, 4>; } // namespace bdm #endif // CORE_CONTAINER_MATH_ARRAY_H_