source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_assign_zombie4.c
//------------------------------------------------------------------------------ // GB_assign_zombie4: delete entries in C(i,:) for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // For GrB_Row_assign or GrB_Col_assign, C(i,J)<M,repl>=..., if C_replace is // true, and mask M is present, then any entry C(i,j) outside the list J must // be deleted, if M(0,j)=0. // GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other. #include "GB_assign.h" void GB_assign_zombie4 ( GrB_Matrix Z, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const int64_t i, // index of entries to delete const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // get Z //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Zh = Z->h ; const int64_t *GB_RESTRICT Zp = Z->p ; const int64_t Znvec = Z->nvec ; int64_t *GB_RESTRICT Zi = Z->i ; int64_t nzombies = Z->nzombies ; const int64_t zorig = nzombies ; //-------------------------------------------------------------------------- // get M //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Mh = M->h ; const int64_t *GB_RESTRICT Mp = M->p ; const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const int64_t Mnvec = M->nvec ; const bool M_is_hyper = M->is_hyper ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (Znvec, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; //-------------------------------------------------------------------------- // delete entries in Z(i,:) //-------------------------------------------------------------------------- // The entry Z(i,j) is deleted if j is not in the J, and if M(0,j)=0 (if // the mask is not complemented) or M(0.j)=1 (if the mask is complemented. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t kfirst, klast ; GB_PARTITION (kfirst, klast, Znvec, taskid, ntasks) ; for (int64_t k = kfirst ; k < klast ; k++) { //------------------------------------------------------------------ // get Z(:,j) and determine if j is outside the list J //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ; if (j_outside) { //-------------------------------------------------------------- // j is not in J; find Z(i,j) //-------------------------------------------------------------- int64_t pZ = Zp [k] ; int64_t pZ_end = Zp [k+1] ; int64_t pright = pZ_end - 1 ; bool found, is_zombie ; GB_BINARY_SEARCH_ZOMBIE (i, Zi, pZ, pright, found, zorig, is_zombie) ; //-------------------------------------------------------------- // delete Z(i,j) if found, not a zombie, and M(0,j) allows it //-------------------------------------------------------------- if (found && !is_zombie) { //---------------------------------------------------------- // Z(i,j) is a live entry not in the Z(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. int64_t pM, pM_end ; int64_t pleft = 0 ; int64_t pright = Mnvec - 1 ; GB_lookup (M_is_hyper, Mh, Mp, &pleft, pright, j, &pM, &pM_end) ; bool mij = false ; if (pM < pM_end) { // found it mij = GB_mcast (Mx, pM, msize) ; } if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete Z(i,j) by marking it as a zombie nzombies++ ; Zi [pZ] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- Z->nzombies = nzombies ; }
thread-limit-3.c
#include <stdlib.h> #include <omp.h> int main () { #pragma omp target if (0) #pragma omp teams thread_limit (1) if (omp_get_thread_limit () != 1) abort (); return 0; }
matrix_csr.h
#ifndef XGBOOST_UTILS_MATRIX_CSR_H_ #define XGBOOST_UTILS_MATRIX_CSR_H_ /*! * \file matrix_csr.h * \brief this file defines some easy to use STL based class for in memory sparse CSR matrix * \author Tianqi Chen */ #include <vector> #include <utility> #include <algorithm> #include "./io.h" #include "./utils.h" #include "./omp.h" namespace xgboost { namespace utils { /*! * \brief a class used to help construct CSR format matrix, * can be used to convert row major CSR to column major CSR * \tparam IndexType type of index used to store the index position, usually unsigned or size_t * \tparam whether enabling the usage of aclist, this option must be enabled manually */ template<typename IndexType, bool UseAcList = false, typename SizeType = size_t> struct SparseCSRMBuilder { private: /*! \brief dummy variable used in the indicator matrix construction */ std::vector<size_t> dummy_aclist; /*! \brief pointer to each of the row */ std::vector<SizeType> &rptr; /*! \brief index of nonzero entries in each row */ std::vector<IndexType> &findex; /*! \brief a list of active rows, used when many rows are empty */ std::vector<size_t> &aclist; public: SparseCSRMBuilder(std::vector<SizeType> &p_rptr, std::vector<IndexType> &p_findex) :rptr(p_rptr), findex(p_findex), aclist(dummy_aclist) { Assert(!UseAcList, "enabling bug"); } /*! \brief use with caution! rptr must be cleaned before use */ SparseCSRMBuilder(std::vector<SizeType> &p_rptr, std::vector<IndexType> &p_findex, std::vector<size_t> &p_aclist) :rptr(p_rptr), findex(p_findex), aclist(p_aclist) { Assert(UseAcList, "must manually enable the option use aclist"); } public: /*! * \brief step 1: initialize the number of rows in the data, not necessary exact * \nrows number of rows in the matrix, can be smaller than expected */ inline void InitBudget(size_t nrows = 0) { if (!UseAcList) { rptr.clear(); rptr.resize(nrows + 1, 0); } else { Assert(nrows + 1 == rptr.size(), "rptr must be initialized already"); this->Cleanup(); } } /*! * \brief step 2: add budget to each rows, this function is called when aclist is used * \param row_id the id of the row * \param nelem number of element budget add to this row */ inline void AddBudget(size_t row_id, SizeType nelem = 1) { if (rptr.size() < row_id + 2) { rptr.resize(row_id + 2, 0); } if (UseAcList) { if (rptr[row_id + 1] == 0) aclist.push_back(row_id); } rptr[row_id + 1] += nelem; } /*! \brief step 3: initialize the necessary storage */ inline void InitStorage(void) { // initialize rptr to be beginning of each segment size_t start = 0; if (!UseAcList) { for (size_t i = 1; i < rptr.size(); i++) { size_t rlen = rptr[i]; rptr[i] = start; start += rlen; } } else { // case with active list std::sort(aclist.begin(), aclist.end()); for (size_t i = 0; i < aclist.size(); i++) { size_t ridx = aclist[i]; size_t rlen = rptr[ridx + 1]; rptr[ridx + 1] = start; // set previous rptr to right position if previous feature is not active if (i == 0 || ridx != aclist[i - 1] + 1) rptr[ridx] = start; start += rlen; } } findex.resize(start); } /*! * \brief step 4: * used in indicator matrix construction, add new * element to each row, the number of calls shall be exactly same as add_budget */ inline void PushElem(size_t row_id, IndexType col_id) { SizeType &rp = rptr[row_id + 1]; findex[rp++] = col_id; } /*! * \brief step 5: only needed when aclist is used * clean up the rptr for next usage */ inline void Cleanup(void) { Assert(UseAcList, "this function can only be called use AcList"); for (size_t i = 0; i < aclist.size(); i++) { const size_t ridx = aclist[i]; rptr[ridx] = 0; rptr[ridx + 1] = 0; } aclist.clear(); } }; /*! * \brief a class used to help construct CSR format matrix file * \tparam IndexType type of index used to store the index position * \tparam SizeType type of size used in row pointer */ template<typename IndexType, typename SizeType = size_t> struct SparseCSRFileBuilder { public: explicit SparseCSRFileBuilder(utils::ISeekStream *fo, size_t buffer_size) : fo(fo), buffer_size(buffer_size) { } /*! * \brief step 1: initialize the number of rows in the data, not necessary exact * \nrows number of rows in the matrix, can be smaller than expected */ inline void InitBudget(size_t nrows = 0) { rptr.clear(); rptr.resize(nrows + 1, 0); } /*! * \brief step 2: add budget to each rows * \param row_id the id of the row * \param nelem number of element budget add to this row */ inline void AddBudget(size_t row_id, SizeType nelem = 1) { if (rptr.size() < row_id + 2) { rptr.resize(row_id + 2, 0); } rptr[row_id + 1] += nelem; } /*! \brief step 3: initialize the necessary storage */ inline void InitStorage(void) { SizeType nelem = 0; for (size_t i = 1; i < rptr.size(); i++) { nelem += rptr[i]; rptr[i] = nelem; } begin_data = static_cast<SizeType>(fo->Tell()) + sizeof(SizeType); SizeType begin_meta = begin_data + nelem * sizeof(IndexType); fo->Write(&begin_meta, sizeof(begin_meta)); fo->Seek(begin_meta); fo->Write(rptr); // setup buffer space buffer_rptr.resize(rptr.size()); buffer_temp.reserve(buffer_size); buffer_data.resize(buffer_size); saved_offset = rptr; saved_offset.resize(rptr.size() - 1); this->ClearBuffer(); } /*! \brief step 4: push element into buffer */ inline void PushElem(SizeType row_id, IndexType col_id) { if (buffer_temp.size() == buffer_size) { this->WriteBuffer(); this->ClearBuffer(); } buffer_rptr[row_id + 1] += 1; buffer_temp.push_back(std::make_pair(row_id, col_id)); } /*! \brief finalize the construction */ inline void Finalize(void) { this->WriteBuffer(); for (size_t i = 0; i < saved_offset.size(); ++i) { utils::Assert(saved_offset[i] == rptr[i+1], "some block not write out"); } } /*! \brief content must be in wb+ */ template<typename Comparator> inline void SortRows(Comparator comp, size_t step) { for (size_t i = 0; i < rptr.size() - 1; i += step) { bst_omp_uint begin = static_cast<bst_omp_uint>(i); bst_omp_uint end = static_cast<bst_omp_uint>(std::min(rptr.size() - 1, i + step)); if (rptr[end] != rptr[begin]) { fo->Seek(begin_data + rptr[begin] * sizeof(IndexType)); buffer_data.resize(rptr[end] - rptr[begin]); fo->Read(BeginPtr(buffer_data), (rptr[end] - rptr[begin]) * sizeof(IndexType)); // do parallel sorting #pragma omp parallel for schedule(static) for (bst_omp_uint j = begin; j < end; ++j) { std::sort(&buffer_data[0] + rptr[j] - rptr[begin], &buffer_data[0] + rptr[j+1] - rptr[begin], comp); } fo->Seek(begin_data + rptr[begin] * sizeof(IndexType)); fo->Write(BeginPtr(buffer_data), (rptr[end] - rptr[begin]) * sizeof(IndexType)); } } printf("CSV::begin_dat=%lu\n", begin_data); } protected: inline void WriteBuffer(void) { SizeType start = 0; for (size_t i = 1; i < buffer_rptr.size(); ++i) { size_t rlen = buffer_rptr[i]; buffer_rptr[i] = start; start += rlen; } for (size_t i = 0; i < buffer_temp.size(); ++i) { SizeType &rp = buffer_rptr[buffer_temp[i].first + 1]; buffer_data[rp++] = buffer_temp[i].second; } // write out for (size_t i = 0; i < buffer_rptr.size() - 1; ++i) { size_t nelem = buffer_rptr[i+1] - buffer_rptr[i]; if (nelem != 0) { utils::Assert(saved_offset[i] + nelem <= rptr[i+1], "data exceed bound"); fo->Seek(saved_offset[i] * sizeof(IndexType) + begin_data); fo->Write(&buffer_data[0] + buffer_rptr[i], nelem * sizeof(IndexType)); saved_offset[i] += nelem; } } } inline void ClearBuffer(void) { buffer_temp.clear(); std::fill(buffer_rptr.begin(), buffer_rptr.end(), 0); } private: /*! \brief output file pointer the data */ utils::ISeekStream *fo; /*! \brief pointer to each of the row */ std::vector<SizeType> rptr; /*! \brief saved top space of each item */ std::vector<SizeType> saved_offset; /*! \brief beginning position of data */ size_t begin_data; // ----- the following are buffer space /*! \brief maximum size of content buffer*/ size_t buffer_size; /*! \brief store the data content */ std::vector< std::pair<SizeType, IndexType> > buffer_temp; /*! \brief saved top space of each item */ std::vector<SizeType> buffer_rptr; /*! \brief saved top space of each item */ std::vector<IndexType> buffer_data; }; } // namespace utils } // namespace xgboost #endif
DRB104-nowait-barrier-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is based on one code snippet extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Explicit barrier to counteract nowait */ #include <stdio.h> #include <assert.h> #include <omp.h> int main() { int i; int error; int len = 1000; int a[len]; int b = 5; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = i; } { #pragma omp parallel for private (i) firstprivate (len,b) for (i = 0; i <= len - 1; i += 1) { a[i] = b + a[i] * 5; } } error = a[9] + 1; (((void )(sizeof(((error == 51?1 : 0))))) , (( { if (error == 51) ; else __assert_fail("error == 51","DRB104-nowait-barrier-orig-no.c",70,__PRETTY_FUNCTION__); }))); printf("error = %d\n",error); return 0; }
Pooling.h
#ifndef POOLING #define POOLING /* * Pooling.h: * pool operation, max, min, average and sum pooling * * Created on: Apr 22, 2017 * Author: mszhang */ #include "MyLib.h" #include "Node.h" #include "Graph.h" class PoolNode : public Node { public: vector<PNode> ins; public: PoolNode() : Node() { ins.clear(); } ~PoolNode() { ins.clear(); } inline void clearValue() { Node::clearValue(); ins.clear(); } inline void setParam(int maxsize) { } inline void init(int ndim, dtype dropout) { Node::init(ndim, dropout); } public: void forward(Graph *cg, const vector<PNode>& x) { if (x.size() == 0) { std::cout << "empty inputs for max|min|sum|avg pooling" << std::endl; return; } int nSize = x.size(); ins.clear(); for (int i = 0; i < nSize; i++) { int val_dim0 = x[i]->val.shape()[0]; if (val_dim0 != dim) { std::cout << "input matrixes are not matched" << std::endl; clearValue(); return; } ins.push_back(x[i]); } degree = 0; for (int i = 0; i < nSize; i++) { ins[i]->addParent(this); } cg->addNode(this); } public: inline PExecute generate(bool bTrain); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { return Node::typeEqual(other); } public: virtual void compute() = 0; virtual void backward() = 0; }; class MaxPoolNode : public PoolNode { public: IndexPtr index; MaxPoolNode() : PoolNode() { node_type = "max-pooling"; } void init(int ndim, dtype dropout) { Node::init(ndim, dropout); DEV->init_index_ptr(index, ndim); } ~MaxPoolNode(){ } public: //Be careful that the row is the dim of input vector, and the col is the number of input vectors //Another point is that we change the input vectors directly. inline void compute() { int nSize = ins.size(); //LDG::Tensor in_x; //DEV->init(in_x, Shape({dim, nSize})); vector<LDG::PTensor> vec_ins; for (int i = 0; i < nSize; ++i) { vec_ins.push_back(&ins[i]->val); } //DEV->concat(vec_ins, in_x); DEV->FMaxPooling(vec_ins, val, index.get_ptr()); } inline void backward() { int nSize = ins.size(); vector<LDG::PTensor> vec_in_loss; for (int i = 0; i < nSize; i++) { vec_in_loss.push_back(&ins[i]->loss); } DEV->DMaxPooling(loss, vec_in_loss, index.get_ptr()); } }; class AvgPoolNode : public PoolNode { public: AvgPoolNode() : PoolNode() { node_type = "avg-pooling"; } public: //Be careful that the row is the dim of input vector, and the col is the number of input vectors //Another point is that we change the input vectors directly. inline void compute() { int nSize = ins.size(); vector<LDG::PTensor> vec_ins; for (int i = 0; i < nSize; ++i) { vec_ins.push_back(&ins[i]->val); } DEV->FAvgPooling(vec_ins, val); } inline void backward() { int nSize = ins.size(); vector<LDG::PTensor> vec_ins_loss; for (int i = 0; i < nSize; i++) { vec_ins_loss.push_back(&ins[i]->loss); } DEV->DAvgPooling(loss, vec_ins_loss); } }; class SumPoolNode : public PoolNode { public: SumPoolNode() : PoolNode() { node_type = "sum-pooling"; } public: //Be careful that the row is the dim of input vector, and the col is the number of input vectors //Another point is that we change the input vectors directly. inline void compute() { int nSize = ins.size(); vector<LDG::PTensor> vec_ins; for (int i = 0; i < nSize; ++i) { vec_ins.push_back(&ins[i]->val); } DEV->FSumPooling(vec_ins, val); } inline void backward() { int nSize = ins.size(); vector<LDG::PTensor> vec_ins_loss; for (int i = 0; i < nSize; i++) { vec_ins_loss.push_back(&ins[i]->loss); } DEV->DSumPooling(loss, vec_ins_loss); } }; class MinPoolNode : public PoolNode { public: IndexPtr index; MinPoolNode() : PoolNode() { node_type = "min-pooling"; } void init(int ndim, dtype dropout) { Node::init(ndim, dropout); DEV->init_index_ptr(index, ndim); } ~MinPoolNode(){ } public: //Be careful that the row is the dim of input vector, and the col is the number of input vectors //Another point is that we change the input vectors directly. inline void compute() { int nSize = ins.size(); vector<LDG::PTensor> vec_ins; for (int i = 0; i < nSize; ++i) { vec_ins.push_back(&ins[i]->val); } DEV->FMinPooling(vec_ins, val, index.get_ptr()); } inline void backward() { int nSize = ins.size(); vector<LDG::PTensor> vec_in_loss; for (int i = 0; i < nSize; i++) { vec_in_loss.push_back(&ins[i]->loss); } DEV->DMinPooling(loss, vec_in_loss, index.get_ptr()); } }; //#if USE_GPU //class PoolExecute : public Execute { //public: // bool bTrain; //public: // inline void forward() { // int count = batch.size(); // for (int idx = 0; idx < count; idx++) { // PoolNode* ptr = (PoolNode*)batch[idx]; // ptr->compute(); // ptr->forward_drop(bTrain); // } // } // // inline void backward() { // int count = batch.size(); // for (int idx = 0; idx < count; idx++) { // PoolNode* ptr = (PoolNode*)batch[idx]; // ptr->backward_drop(); // ptr->backward(); // } // } //}; // //inline PExecute PoolNode::generate(bool bTrain) { // PoolExecute* exec = new PoolExecute(); // exec->batch.push_back(this); // exec->bTrain = bTrain; // return exec; //} //#else class PoolExecute : public Execute { public: bool bTrain; vector<LDG::PTensor> vec_val; vector<LDG::PTensor> vec_loss; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for schedule(static,1) PoolNode* ptr = (PoolNode*)batch[0]; drop_value = ptr->drop_value; vec_val.resize(count); for (int idx = 0; idx < count; idx++) { PoolNode* ptr = (PoolNode*)batch[idx]; vec_val[idx] = (&ptr->val); ptr->compute(); ptr->degree = -1; //ptr->forward_drop(bTrain); } if(drop_value > 0) { if(bTrain) DEV->Fdropout(vec_val, drop_value, mask, vec_val); else DEV->Fdropout(vec_val, drop_value, vec_val); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for schedule(static,1) vec_loss.resize(count); for (int idx = 0; idx < count; idx++) { PoolNode* ptr = (PoolNode*)batch[idx]; vec_loss[idx] = (&ptr->loss); } if (drop_value > 0) { DEV->Ddropout(vec_loss, mask); } for (int idx = 0; idx < count; idx++) { PoolNode* ptr = (PoolNode*)batch[idx]; //ptr->backward_drop(); ptr->backward(); } } }; inline PExecute PoolNode::generate(bool bTrain) { PoolExecute* exec = new PoolExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; return exec; } //#endif #endif
6267.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[2000 + 0][2600 + 0], double ey[2000 + 0][2600 + 0], double hz[2000 + 0][2600 + 0], double _fict_[1000 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 64) for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 64) for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 64) for (t10 = t8; t10 <= (ny - 2 < t8 + 63 ? ny - 2 : t8 + 63); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(8*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(4*t1+Nx+5,1024)),floord(8*t2+Nx+4,1024)),floord(8*t3+Nx+4,1024)),floord(8*t1-8*t2+Nz+Nx+3,1024));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),1024*t4+1022),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
critical2.c
// PASS: * // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t // RUN: diff <(mpirun -np 4 %t) %s.reference_output #include <omp.h> #include <stdio.h> #include <unistd.h> int main() { int i; #pragma omp parallel shared(i) { int thread = omp_get_thread_num(); if (thread == 0) { sleep(0); } if (thread == 1) { sleep(1); } if (thread == 2) { sleep(4); } if (thread == 3) { sleep(5); } // ordering: 0,1, 2,3 printf("Thread %i before critical\n", thread); #pragma omp critical { if (thread == 0) { sleep(2); } if (thread == 1) { usleep(10); } if (thread == 2) { sleep(2); } if (thread == 3) { usleep(10); } } // ordering: 0,1, 2,3 // if critical does not work 1 will "overtake" 0 same for 3 and 2 printf("Thread %i after critical\n", thread); } }
GB_binop__ne_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_08__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_02__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_04__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int8) // A*D function (colscale): GB (_AxD__ne_int8) // D*A function (rowscale): GB (_DxB__ne_int8) // C+=B function (dense accum): GB (_Cdense_accumB__ne_int8) // C+=b function (dense accum): GB (_Cdense_accumb__ne_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int8) // C=scalar+B GB (_bind1st__ne_int8) // C=scalar+B' GB (_bind1st_tran__ne_int8) // C=A+scalar GB (_bind2nd__ne_int8) // C=A'+scalar GB (_bind2nd_tran__ne_int8) // C type: bool // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8ecbb58ff87b7b2f24572e1df17ad7c57ea1b5f1.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int padfunc(struct dataobj *restrict delta_vec, const int x_M, const int y_M, const int z_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int z_m) { float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]]) delta_vec->data; #pragma omp target enter data map(to: delta[0:delta_vec->size[0]][0:delta_vec->size[1]][0:delta_vec->size[2]]) struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1) { #pragma omp target teams distribute parallel for collapse(2) for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { delta[abc_x_l + 2][y + 2][z + 2] = delta[12][y + 2][z + 2]; } } } for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1) { #pragma omp target teams distribute parallel for collapse(2) for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { delta[abc_x_r + 2][y + 2][z + 2] = delta[x_M - 8][y + 2][z + 2]; } } } #pragma omp target teams distribute parallel for collapse(1) for (int x = x_m; x <= x_M; x += 1) { for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1) { for (int z = z_m; z <= z_M; z += 1) { delta[x + 2][abc_y_l + 2][z + 2] = delta[x + 2][12][z + 2]; } } for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1) { for (int z = z_m; z <= z_M; z += 1) { delta[x + 2][abc_y_r + 2][z + 2] = delta[x + 2][y_M - 8][z + 2]; } } for (int y = y_m; y <= y_M; y += 1) { for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1) { delta[x + 2][y + 2][abc_z_l + 2] = delta[x + 2][y + 2][12]; } for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1) { delta[x + 2][y + 2][abc_z_r + 2] = delta[x + 2][y + 2][z_M - 8]; } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; #pragma omp target update from(delta[0:delta_vec->size[0]][0:delta_vec->size[1]][0:delta_vec->size[2]]) #pragma omp target exit data map(release: delta[0:delta_vec->size[0]][0:delta_vec->size[1]][0:delta_vec->size[2]]) return 0; }
DemBonesExt.h
/////////////////////////////////////////////////////////////////////////////// // Dem Bones - Skinning Decomposition Library // // Copyright (c) 2019, Electronic Arts. All rights reserved. // /////////////////////////////////////////////////////////////////////////////// #ifndef DEM_BONES_EXT #define DEM_BONES_EXT #include "DemBones.h" #include <Eigen/Geometry> #ifndef DEM_BONES_MAT_BLOCKS #include "MatBlocks.h" #define DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED #endif namespace Dem { /** @class DemBonesExt DemBonesExt.h "DemBones/DemBonesExt.h" @brief Extended class to handle hierarchical skeleton with local rotations/translations and bind matrices @details Call computeRTB() to get local rotations/translations and bind matrices after skinning decomposition is done and other data is set. @b _Scalar is the floating-point data type. @b _AniMeshScalar is the floating-point data type of mesh sequence #v. */ template<class _Scalar, class _AniMeshScalar> class DemBonesExt: public DemBones<_Scalar, _AniMeshScalar> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Matrix4=Eigen::Matrix<_Scalar, 4, 4>; using Matrix3=Eigen::Matrix<_Scalar, 3, 3>; using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>; using Vector4=Eigen::Matrix<_Scalar, 4, 1>; using Vector3=Eigen::Matrix<_Scalar, 3, 1>; using SparseMatrix=Eigen::SparseMatrix<_Scalar>; using Triplet=Eigen::Triplet<_Scalar>; using DemBones<_Scalar, _AniMeshScalar>::nIters; using DemBones<_Scalar, _AniMeshScalar>::nInitIters; using DemBones<_Scalar, _AniMeshScalar>::nTransIters; using DemBones<_Scalar, _AniMeshScalar>::transAffine; using DemBones<_Scalar, _AniMeshScalar>::transAffineNorm; using DemBones<_Scalar, _AniMeshScalar>::nWeightsIters; using DemBones<_Scalar, _AniMeshScalar>::nnz; using DemBones<_Scalar, _AniMeshScalar>::weightsSmooth; using DemBones<_Scalar, _AniMeshScalar>::weightsSmoothStep; using DemBones<_Scalar, _AniMeshScalar>::weightEps; using DemBones<_Scalar, _AniMeshScalar>::nV; using DemBones<_Scalar, _AniMeshScalar>::nB; using DemBones<_Scalar, _AniMeshScalar>::nS; using DemBones<_Scalar, _AniMeshScalar>::nF; using DemBones<_Scalar, _AniMeshScalar>::fStart; using DemBones<_Scalar, _AniMeshScalar>::subjectID; using DemBones<_Scalar, _AniMeshScalar>::u; using DemBones<_Scalar, _AniMeshScalar>::w; using DemBones<_Scalar, _AniMeshScalar>::lockW; using DemBones<_Scalar, _AniMeshScalar>::m; using DemBones<_Scalar, _AniMeshScalar>::lockM; using DemBones<_Scalar, _AniMeshScalar>::v; using DemBones<_Scalar, _AniMeshScalar>::fv; using DemBones<_Scalar, _AniMeshScalar>::iter; using DemBones<_Scalar, _AniMeshScalar>::iterTransformations; using DemBones<_Scalar, _AniMeshScalar>:: iterWeights; //! Timestamps for bone transformations #m, [@c size] = #nS, #fTime(@p k) is the timestamp of frame @p k Eigen::VectorXd fTime; //! Name of bones, [@c size] = #nB, #boneName(@p j) is the name bone of @p j std::vector<std::string> boneName; //! Parent bone index, [@c size] = #nB, #parent(@p j) is the index of parent bone of @p j, #parent(@p j) = -1 if @p j has no parent. Eigen::VectorXi parent; //! Original bind pre-matrix, [@c size] = [4*#nS, 4*#nB], #bind.@a block(4*@p s, 4*@p j, 4, 4) is the global bind matrix of bone @p j on subject @p s at the rest pose MatrixX bind; //! Inverse pre-multiplication matrices, [@c size] = [4*#nS, 4*#nB], #preMulInv.@a block(4*@p s, 4*@p j, 4, 4) is the inverse of pre-local transformation of bone @p j on subject @p s MatrixX preMulInv; //! Rotation order, [@c size] = [3*#nS, #nB], #rotOrder.@a col(@p j).@a segment<3>(3*@p s) is the rotation order of bone @p j on subject @p s, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order Eigen::MatrixXi rotOrder; //! Orientations of bones, [@c size] = [3*#nS, #nB], @p orient.@a col(@p j).@a segment<3>(3*@p s) is the(@c rx, @c ry, @c rz) orientation of bone @p j in degree MatrixX orient; //! Bind transformation update, 0=keep original, 1=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity, 2=do 1 and group joints int bindUpdate; /** @brief Constructor and setting default parameters */ DemBonesExt(): bindUpdate(0) { clear(); } /** @brief Clear all data */ void clear() { fTime.resize(0); boneName.resize(0); parent.resize(0); bind.resize(0, 0); preMulInv.resize(0, 0); rotOrder.resize(0, 0); orient.resize(0, 0); DemBones<_Scalar, _AniMeshScalar>::clear(); } /** @brief Local rotations, translations and global bind matrices of a subject @details Required all data in the base class: #u, #fv, #nV, #v, #nF, #fStart, #subjectID, #nS, #m, #w, #nB This function will initialize missing attributes: - #parent: -1 vector (if no joint grouping) or parent to a root, [@c size] = #nB - #preMulInv: 4*4 identity matrix blocks, [@c size] = [4*#nS, 4*#nB] - #rotOrder: {0, 1, 2} vector blocks, [@c size] = [3*#nS, #nB] - #orient: 0 matrix, [@c size] = [3*#nS, #nB] @param[in] s is the subject index @param[out] lr is the [3*@p nFr, #nB] by-reference output local rotations, @p lr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j at frame @p k @param[out] lt is the [3*@p nFr, #nB] by-reference output local translations, @p lt.@a col(@p j).segment<3>(3*@p k) is the (@c tx, @c ty, @c tz) of bone @p j at frame @p k @param[out] gb is the [4, 4*#nB] by-reference output global bind matrices, @p gb.@a block(0, 4*@p j, 4, 4) is the bind matrix of bone j @param[out] lbr is the [3, #nB] by-reference output local rotations at bind pose @p lbr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j @param[out] lbt is the [3, #nB] by-reference output local translations at bind pose, @p lbt.@a col(@p j).segment<3>(3*@p k) is the (@c tx, @c ty, @c tz) of bone @p j @param[in] degreeRot=true will output rotations in degree, otherwise output in radian */ void computeRTB(int s, MatrixX& lr, MatrixX& lt, MatrixX& gb, MatrixX& lbr, MatrixX& lbt, bool degreeRot=true) { computeBind(s, gb); if (parent.size()==0) { if (bindUpdate==2) { int root=computeRoot(); parent=Eigen::VectorXi::Constant(nB, root); parent(root)=-1; } else parent=Eigen::VectorXi::Constant(nB, -1); } if (preMulInv.size()==0) preMulInv=MatrixX::Identity(4, 4).replicate(nS, nB); if (rotOrder.size()==0) rotOrder=Eigen::Vector3i(0, 1, 2).replicate(nS, nB); if (orient.size()==0) orient=MatrixX::Zero(3*nS, nB); int nFs=fStart(s+1)-fStart(s); lr.resize(nFs*3, nB); lt.resize(nFs*3, nB); lbr.resize(3, nB); lbt.resize(3, nB); MatrixX lm(4*nFs, 4*nB); #pragma omp parallel for for (int j=0; j<nB; j++) { Eigen::Vector3i ro=rotOrder.col(j).template segment<3>(s*3); Vector3 ov=orient.vec3(s, j)*EIGEN_PI/180; Matrix3 invOM=Matrix3(Eigen::AngleAxis<_Scalar>(ov(ro(2)), Vector3::Unit(ro(2))))* Eigen::AngleAxis<_Scalar>(ov(ro(1)), Vector3::Unit(ro(1)))* Eigen::AngleAxis<_Scalar>(ov(ro(0)), Vector3::Unit(ro(0))); invOM.transposeInPlace(); Matrix4 lb; if (parent(j)==-1) lb=preMulInv.blk4(s, j)*gb.blk4(0, j); else lb=preMulInv.blk4(s, j)*gb.blk4(0, parent(j)).inverse()*gb.blk4(0, j); Vector3 curRot=Vector3::Zero(); toRot(invOM*lb.template topLeftCorner<3, 3>(), curRot, ro); lbr.col(j)=curRot; lbt.col(j)=lb.template topRightCorner<3, 1>(); Matrix4 lm; for (int k=0; k<nFs; k++) { if (parent(j)==-1) lm=preMulInv.blk4(s, j)*m.blk4(k+fStart(s), j)*gb.blk4(0, j); else lm=preMulInv.blk4(s, j)*(m.blk4(k+fStart(s), parent(j))*gb.blk4(0, parent(j))).inverse()*m.blk4(k+fStart(s), j)*gb.blk4(0, j); toRot(invOM*lm.template topLeftCorner<3, 3>(), curRot, ro); lr.vec3(k, j)=curRot; lt.vec3(k, j)=lm.template topRightCorner<3, 1>(); } } if (degreeRot) { lr*=180/EIGEN_PI; lbr*=180/EIGEN_PI; } } private: /** p-norm centroids (using #transAffineNorm) and rotations to identity @param s is the subject index @param b is the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j */ void computeCentroids(int s, MatrixX& b) { MatrixX c=MatrixX::Zero(4, nB); for (int i=0; i<nV; i++) for (typename SparseMatrix::InnerIterator it(w, i); it; ++it) c.col(it.row())+=pow(it.value(), transAffineNorm)*u.vec3(s, i).homogeneous(); for (int j=0; j<nB; j++) if ((c(3, j)!=0)&&(lockM(j)==0)) b.transVec(0, j)=c.col(j).template head<3>()/c(3, j); } /** Global bind pose @param s is the subject index @param bindUpdate is the type of bind pose update, 0=keep original, 1 or 2=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity @param b is the the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j */ void computeBind(int s, MatrixX& b) { if (bind.size()==0) { lockM=Eigen::VectorXi::Zero(nB); bind.resize(nS*4, nB*4); for (int k=0; k<nS; k++) { b=MatrixX::Identity(4, 4).replicate(1, nB); computeCentroids(k, b); bind.block(4*k, 0, 4, 4*nB)=b; } } b=bind.block(4*s, 0, 4, 4*nB); if (bindUpdate>=1) computeCentroids(s, b); } /** Root joint */ int computeRoot() { VectorX err(nB); #pragma omp parallel for for (int j=0; j<nB; j++) { double ej=0; for (int i=0; i<nV; i++) for (int k=0; k<nF; k++) ej+=(m.rotMat(k, j)*u.vec3(subjectID(k), i)+m.transVec(k, j)-v.vec3(k, i).template cast<_Scalar>()).squaredNorm(); err(j)=ej; } int rj; err.minCoeff(&rj); return rj; } /** Euler angles from rotation matrix @param rMat is the 3*3 rotation matrix @param curRot is the input current Euler angles, it is also the by-reference output closet Euler angles correspond to @p rMat @param ro is the rotation order, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order @param eps is the epsilon */ void toRot(const Matrix3& rMat, Vector3& curRot, const Eigen::Vector3i& ro, _Scalar eps=_Scalar(1e-10)) { Vector3 r0=rMat.eulerAngles(ro(2), ro(1), ro(0)).reverse(); _Scalar gMin=(r0-curRot).squaredNorm(); Vector3 rMin=r0; Vector3 r; Matrix3 tmpMat; for (int fx=-1; fx<=1; fx+=2) for (_Scalar sx=-2*EIGEN_PI; sx<2.1*EIGEN_PI; sx+=EIGEN_PI) { r(0)=fx*r0(0)+sx; for (int fy=-1; fy<=1; fy+=2) for (_Scalar sy=-2*EIGEN_PI; sy<2.1*EIGEN_PI; sy+=EIGEN_PI) { r(1)=fy*r0(1)+sy; for (int fz=-1; fz<=1; fz+=2) for (_Scalar sz=-2*EIGEN_PI; sz<2.1*EIGEN_PI; sz+=EIGEN_PI) { r(2)=fz*r0(2)+sz; tmpMat=Matrix3(Eigen::AngleAxis<_Scalar>(r(ro(2)), Vector3::Unit(ro(2))))* Eigen::AngleAxis<_Scalar>(r(ro(1)), Vector3::Unit(ro(1)))* Eigen::AngleAxis<_Scalar>(r(ro(0)), Vector3::Unit(ro(0))); if ((tmpMat-rMat).squaredNorm()<eps) { _Scalar tmp=(r-curRot).squaredNorm(); if (tmp<gMin) { gMin=tmp; rMin=r; } } } } } curRot=rMin; } }; } #ifdef DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED #undef blk4 #undef rotMat #undef transVec #undef vec3 #undef DEM_BONES_MAT_BLOCKS #endif #undef rotMatFromEuler #endif
blas_server_omp.c
/*********************************************************************/ /* Copyright 2009, 2010 The University of Texas at Austin. */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or */ /* without modification, are permitted provided that the following */ /* conditions are met: */ /* */ /* 1. Redistributions of source code must retain the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer. */ /* */ /* 2. Redistributions in binary form must reproduce the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer in the documentation and/or other materials */ /* provided with the distribution. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* */ /* The views and conclusions contained in the software and */ /* documentation are those of the authors and should not be */ /* interpreted as representing official policies, either expressed */ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ #include <stdio.h> #include <stdlib.h> //#include <sys/mman.h> #include "common.h" #ifndef USE_OPENMP #include "blas_server.c" #else int blas_server_avail = 0; static void * blas_thread_buffer[MAX_CPU_NUMBER]; void goto_set_num_threads(int num_threads) { int i=0; if (num_threads < 1) num_threads = blas_num_threads; if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER; if (num_threads > blas_num_threads) { blas_num_threads = num_threads; } blas_cpu_number = num_threads; omp_set_num_threads(blas_cpu_number); //adjust buffer for each thread for(i=0; i<blas_cpu_number; i++){ if(blas_thread_buffer[i]==NULL){ blas_thread_buffer[i]=blas_memory_alloc(2); } } for(; i<MAX_CPU_NUMBER; i++){ if(blas_thread_buffer[i]!=NULL){ blas_memory_free(blas_thread_buffer[i]); blas_thread_buffer[i]=NULL; } } #if defined(ARCH_MIPS64) //set parameters for different number of threads. blas_set_parameter(); #endif } void openblas_set_num_threads(int num_threads) { goto_set_num_threads(num_threads); } int blas_thread_init(void){ int i=0; blas_get_cpu_number(); blas_server_avail = 1; for(i=0; i<blas_num_threads; i++){ blas_thread_buffer[i]=blas_memory_alloc(2); } for(; i<MAX_CPU_NUMBER; i++){ blas_thread_buffer[i]=NULL; } return 0; } int BLASFUNC(blas_thread_shutdown)(void){ int i=0; blas_server_avail = 0; for(i=0; i<MAX_CPU_NUMBER; i++){ if(blas_thread_buffer[i]!=NULL){ blas_memory_free(blas_thread_buffer[i]); blas_thread_buffer[i]=NULL; } } return 0; } static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ if (!(mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if (mode & BLAS_XDOUBLE){ /* REAL / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if (mode & BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else { /* REAL / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } } else { #ifdef EXPRECISION if (mode & BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], ((xdouble *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if (mode & BLAS_DOUBLE){ /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], ((double *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else { /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], ((float *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } } } static void exec_threads(blas_queue_t *queue){ void *buffer, *sa, *sb; int pos=0, release_flag=0; buffer = NULL; sa = queue -> sa; sb = queue -> sb; #ifdef CONSISTENT_FPCSR __asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode)); __asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode)); #endif if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) { pos = omp_get_thread_num(); buffer = blas_thread_buffer[pos]; //fallback if(buffer==NULL) { buffer = blas_memory_alloc(2); release_flag=1; } if (sa == NULL) { sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A); queue->sa=sa; } if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if (queue -> mode & BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if (queue -> mode & BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else { sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } } else { #ifdef EXPRECISION if (queue -> mode & BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if (queue -> mode & BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else { sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } } queue->sb=sb; } } if (queue -> mode & BLAS_LEGACY) { legacy_exec(queue -> routine, queue -> mode, queue -> args, sb); } else if (queue -> mode & BLAS_PTHREAD) { void (*pthreadcompat)(void *) = queue -> routine; (pthreadcompat)(queue -> args); } else { int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine; (routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position); } if (release_flag) blas_memory_free(buffer); } int exec_blas(BLASLONG num, blas_queue_t *queue){ BLASLONG i; if ((num <= 0) || (queue == NULL)) return 0; #ifdef CONSISTENT_FPCSR for (i = 0; i < num; i ++) { __asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode)); __asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode)); } #endif #pragma omp parallel for schedule(static) for (i = 0; i < num; i ++) { #ifndef USE_SIMPLE_THREADED_LEVEL3 queue[i].position = i; #endif exec_threads(&queue[i]); } return 0; } #endif
lu.pluto.par2d.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) double L[N][N]; double U[N][N]; double A[N][N+13]; void init_arrays() { int i, j, k; /* have to initialize this matrix properly to prevent * division by zero */ for (i=0; i<N; i++) { for (j=0; j<N; j++) { L[i][j] = 0.0; U[i][j] = 0.0; } } for (i=0; i<N; i++) { for (j=0; j<=i; j++) { L[i][j] = i+j+1; U[j][i] = i+j+1; } } for (i=0; i<N; i++) { for (j=0; j<N; j++) { for (k=0; k<N; k++) { A[i][j] += L[i][k]*U[k][j]; } } } } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; omp_set_nested(1); omp_set_num_threads(2); for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); register int i,j,k; #define S1(zT0,zT1,zT2,zT3,k,j) {A[k][j]=A[k][j]/A[k][k];} #define S2(zT0,zT1,zT2,zT3,zT4,zT5,k,i,j) {A[i][j]=A[i][j]-A[i][k]*A[k][j];} int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 2.21s. */ for (c1=-2;c1<=floord(3*N-4,256);c1++) { lb1=max(max(0,ceild(256*c1-N-253,512)),ceild(256*c1-2*N+3,256)); ub1=min(floord(128*c1+255,128),floord(N-1,256)); #pragma omp parallel for shared(c1,lb1,ub1) private(lb2,ub2,c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { lb2=max(max(max(ceild(256*c1-256*c2-N+2,256),ceild(128*c1-256*c2-127,128)),ceild(128*c1-128*c2-32385,32768)),ceild(128*c1-128*c2-127,256)); ub2=min(floord(N-1,256),floord(256*c1-256*c2+255,256)); #pragma omp parallel for shared(c1,c2,lb1,ub1,lb2,ub2) private(c3,c4,c5,c6,c7,c8,c9) for (c3=lb2; c3<=ub2; c3++) { for (c4=max(max(8*c1-8*c2-8*c3,0),8*c1-8*c2-1800*c3-1778);c4<=min(min(min(min(floord(3968*c3+3937,16),8*c1-8*c2-8*c3+7),floord(128*c2+127,16)),floord(N-2,32)),floord(128*c3+127,16));c4++) { for (c5=max(max(ceild(16*c4-15,16),0),8*c2);c5<=min(floord(N-1,32),8*c2+7);c5++) { for (c6=max(max(max(max(ceild(16*c4-465,496),ceild(8*c1-8*c2-16*c3-c4-217,223)),ceild(-8*c1+8*c2+16*c3+c4-217,225)),8*c3),ceild(16*c4-15,16));c6<=min(8*c3+7,floord(N-1,32));c6++) { if ((c1 == c2+2*c3) && (c4 == c6)) { for (c7=max(0,32*c6);c7<=min(min(32*c5+30,32*c6+30),N-2);c7++) { for (c8=max(c7+1,32*c5);c8<=min(32*c5+31,N-1);c8++) { if ((c1-c2)%2 == 0) { S1((c1-c2)/2,c2,c4,c5,c7,c8) ; } for (c9=c7+1;c9<=min(32*c6+31,N-1);c9++) { if ((c1-c2)%2 == 0) { if ((c1-c2)%2 == 0) { S2((c1-c2)/2,(c1-c2)/2,c2,c4,c4,c5,c7,c9,c8) ; } } } } } } for (c7=max(32*c4,0);c7<=min(min(32*c6-1,32*c5+30),32*c4+31);c7++) { /*@ begin Loop( transform UnrollJam(ufactor=8) for (c8=max(c7+1,32*c5);c8<=min(32*c5+31,N-1);c8++) transform Unroll(ufactor=8) for (c9=32*c6;c9<=min(N-1,32*c6+31);c9++) { S2(c1-c2-c3,c3,c2,c4,c6,c5,c7,c9,c8) ; } ) @*/{ for (c8 = max(c7 + 1, 32 * c5); c8 <= min(32 * c5 + 31, N - 1) - 7; c8 = c8 + 8) { for (c9 = 32 * c6; c9 <= min(N - 1, 32 * c6 + 31) - 7; c9 = c9 + 8) { S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), (c8 + 7)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), (c8 + 7)); } for (; c9 <= min(N - 1, 32 * c6 + 31); c9 = c9 + 1) { S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 1)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 2)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 3)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 4)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 5)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 6)); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, (c8 + 7)); } } for (; c8 <= min(32 * c5 + 31, N - 1); c8 = c8 + 1) { for (c9 = 32 * c6; c9 <= min(N - 1, 32 * c6 + 31) - 7; c9 = c9 + 8) { S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 1), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 2), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 3), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 4), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 5), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 6), c8); S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, (c9 + 7), c8); } for (; c9 <= min(N - 1, 32 * c6 + 31); c9 = c9 + 1) S2(c1 - c2 - c3, c3, c2, c4, c6, c5, c7, c9, c8); } } /*@ end @*/ } if ((c1 == c2+2*c3) && (-c4 == -c6) && (c4 <= min(floord(N-33,32),floord(32*c5-1,32)))) { for (c8=max(32*c5,32*c4+32);c8<=min(N-1,32*c5+31);c8++) { if ((c1-c2)%2 == 0) { S1((c1-c2)/2,c2,c4,c5,32*c4+31,c8) ; } } } } } } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return ((int) A[0][0]); }
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void convdw3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); float32x4_t _sum1 = vmulq_laneq_f32(_r00, _k012x, 0); float32x4_t _sum2 = vmlaq_laneq_f32(_bias0, _r01, _k012x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k012x, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k345x, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k345x, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k345x, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k678x, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k678x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k678x, 2); float32x4_t _sum3 = vmulq_laneq_f32(_r10, _k012x, 0); float32x4_t _sum4 = vmlaq_laneq_f32(_bias0, _r11, _k012x, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r12, _k012x, 2); _sum4 = vfmaq_laneq_f32(_sum4, _r20, _k345x, 0); _sum3 = vfmaq_laneq_f32(_sum3, _r21, _k345x, 1); _sum4 = vfmaq_laneq_f32(_sum4, _r22, _k345x, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r30, _k678x, 0); _sum4 = vfmaq_laneq_f32(_sum4, _r31, _k678x, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r32, _k678x, 2); _sum1 = vaddq_f32(_sum1, _sum2); _sum3 = vaddq_f32(_sum3, _sum4); vst1q_f32(outptr, _sum1); vst1q_f32(outptr2, _sum3); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "vmul.f32 q7, q9, %e14[0] \n" "vand q13, %q17, %q17 \n"// q13 = _bias0 "vmul.f32 q6, q11, %e14[1] \n" "vmla.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "vmul.f32 q8, q9, %e14[0] \n" "vand q15, %q17, %q17 \n"// q15 = _bias0 "vmul.f32 q14, q11, %e14[1] \n" "vmla.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); float32x4_t _sum2 = vmulq_f32(_r10, _k012x); _sum2 = vmlaq_f32(_sum2, _r20, _k345x); _sum2 = vmlaq_f32(_sum2, _r30, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); _sum2 = vsetq_lane_f32(bias0, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _sum1 = vmulq_laneq_f32(_r00, _k012x, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k012x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k012x, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k345x, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k345x, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k345x, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k678x, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k678x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k678x, 2); _sum1 = vaddq_f32(_sum1, _sum2); vst1q_f32(outptr, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "vmul.f32 q7, q8, %e10[0] \n" "vand q14, %q13, %q13 \n"// q14 = _bias0 "vmul.f32 q13, q10, %e10[1] \n" "vmla.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _r0 = vld2q_f32(r0); float32x4x2_t _r0n = vld2q_f32(r0+8); float32x4_t _r00 = _r0.val[0];// 0 2 4 6 float32x4_t _r01 = _r0.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0n.val[0], 1);// 2 4 6 8 float32x4_t _outp = vfmaq_laneq_f32(_bias0, _r00, _k012x, 0); _outp = vfmaq_laneq_f32(_outp, _r01, _k012x, 1); _outp = vfmaq_laneq_f32(_outp, _r02, _k012x, 2); float32x4x2_t _r1 = vld2q_f32(r1); float32x4x2_t _r1n = vld2q_f32(r1+8); float32x4_t _r10 = _r1.val[0]; float32x4_t _r11 = _r1.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1n.val[0], 1); _outp = vfmaq_laneq_f32(_outp, _r10, _k345x, 0); _outp = vfmaq_laneq_f32(_outp, _r11, _k345x, 1); _outp = vfmaq_laneq_f32(_outp, _r12, _k345x, 2); float32x4x2_t _r2 = vld2q_f32(r2); float32x4x2_t _r2n = vld2q_f32(r2+8); float32x4_t _r20 = _r2.val[0]; float32x4_t _r21 = _r2.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2n.val[0], 1); _outp = vfmaq_laneq_f32(_outp, _r20, _k678x, 0); _outp = vfmaq_laneq_f32(_outp, _r21, _k678x, 1); _outp = vfmaq_laneq_f32(_outp, _r22, _k678x, 2); vst1q_f32(outptr, _outp); r0 += 8; r1 += 8; r2 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vand q11, %q13, %q13 \n" "0: \n" "vmul.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "vand q11, %q13, %q13 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_SCHEME ) #define KRATOS_SCHEME /* System includes */ /* External includes */ /* Project includes */ #include "includes/model_part.h" #include "utilities/openmp_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class Scheme * @ingroup KratosCore * @brief This class provides the implementation of the basic tasks that are needed by the solution strategy. * @details It is intended to be the place for tailoring the solution strategies to problem specific tasks. * @tparam TSparseSpace The sparse space considered * @tparam TDenseSpace The dense space considered * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class Scheme { public: ///@name Type Definitions ///@{ /// Pointer definition of Scheme KRATOS_CLASS_POINTER_DEFINITION(Scheme); /// Data type definition typedef typename TSparseSpace::DataType TDataType; /// Matrix type definition typedef typename TSparseSpace::MatrixType TSystemMatrixType; /// Vector type definition typedef typename TSparseSpace::VectorType TSystemVectorType; /// Local system matrix type definition typedef typename TDenseSpace::MatrixType LocalSystemMatrixType; /// Local system vector type definition typedef typename TDenseSpace::VectorType LocalSystemVectorType; /// DoF type definition typedef Dof<double> TDofType; /// DoF array type definition typedef ModelPart::DofsArrayType DofsArrayType; /// DoF iterator type definition typedef typename PointerVectorSet<TDofType, IndexedObject>::iterator DofIterator; /// DoF constant iterator type definition typedef typename PointerVectorSet<TDofType, IndexedObject>::const_iterator DofConstantIterator; /// Elements containers definition typedef ModelPart::ElementsContainerType ElementsArrayType; /// Conditions containers definition typedef ModelPart::ConditionsContainerType ConditionsArrayType; /** * @class LocalSystemComponents * @brief This struct is used in the component wise calculation only is defined here and is used to declare a member variable in the component wise schemes private pointers can only be accessed by means of set and get functions * @details This allows to set and not copy the Element_Variables and Condition_Variables which will be asked and set by another strategy object */ struct LocalSystemComponents { private: ///@name Member Variables ///@{ // Elements std::vector<LocalSystemMatrixType> *mpLHS_Element_Components; const std::vector< Variable< LocalSystemMatrixType > > *mpLHS_Element_Variables; std::vector<LocalSystemVectorType> *mpRHS_Element_Components; const std::vector< Variable< LocalSystemVectorType > > *mpRHS_Element_Variables; // Conditions std::vector<LocalSystemMatrixType> *mpLHS_Condition_Components; const std::vector< Variable< LocalSystemMatrixType > > *mpLHS_Condition_Variables; std::vector<LocalSystemVectorType> *mpRHS_Condition_Components; const std::vector< Variable< LocalSystemVectorType > > *mpRHS_Condition_Variables; ///@} public: ///@name Operations ///@{ /** * @brief This method initializes the pointer of the member variables */ void Initialize() { mpLHS_Element_Components = NULL; mpLHS_Element_Variables = NULL; mpRHS_Element_Components = NULL; mpRHS_Element_Variables = NULL; mpLHS_Condition_Components = NULL; mpLHS_Condition_Variables = NULL; mpRHS_Condition_Components = NULL; mpRHS_Condition_Variables = NULL; } /* Setting pointer variables */ // Elements void SetLHS_Element_Components ( std::vector<LocalSystemMatrixType>& rLHS_Element_Components ) { mpLHS_Element_Components = &rLHS_Element_Components; }; void SetLHS_Element_Variables ( const std::vector< Variable< LocalSystemMatrixType > >& rLHS_Element_Variables ) { mpLHS_Element_Variables = &rLHS_Element_Variables; }; void SetRHS_Element_Components ( std::vector<LocalSystemVectorType>& rRHS_Element_Components ) { mpRHS_Element_Components = &rRHS_Element_Components; }; void SetRHS_Element_Variables ( const std::vector< Variable< LocalSystemVectorType > >& rRHS_Element_Variables ) { mpRHS_Element_Variables = &rRHS_Element_Variables; }; bool Are_LHS_Element_Components_Set() { if( mpLHS_Element_Variables == NULL ) return false; else return true; }; bool Are_RHS_Element_Components_Set() { if( mpRHS_Element_Variables == NULL ) return false; else return true; }; // Conditions void SetLHS_Condition_Components ( std::vector<LocalSystemMatrixType>& rLHS_Condition_Components ) { mpLHS_Condition_Components = &rLHS_Condition_Components; }; void SetLHS_Condition_Variables ( const std::vector< Variable< LocalSystemMatrixType > >& rLHS_Condition_Variables ) { mpLHS_Condition_Variables = &rLHS_Condition_Variables; }; void SetRHS_Condition_Components ( std::vector<LocalSystemVectorType>& rRHS_Condition_Components ) { mpRHS_Condition_Components = &rRHS_Condition_Components; }; void SetRHS_Condition_Variables ( const std::vector< Variable< LocalSystemVectorType > >& rRHS_Condition_Variables ) { mpRHS_Condition_Variables = &rRHS_Condition_Variables; }; bool Are_LHS_Condition_Components_Set() { if( mpLHS_Condition_Variables == NULL ) return false; else return true; }; bool Are_RHS_Condition_Components_Set() { if( mpRHS_Condition_Variables == NULL ) return false; else return true; }; /* Getting pointer variables */ // Elements std::vector<LocalSystemMatrixType>& GetLHS_Element_Components() { return *mpLHS_Element_Components; }; const std::vector< Variable< LocalSystemMatrixType > >& GetLHS_Element_Variables() { return *mpLHS_Element_Variables; }; std::vector<LocalSystemVectorType>& GetRHS_Element_Components() { return *mpRHS_Element_Components; }; const std::vector< Variable< LocalSystemVectorType > >& GetRHS_Element_Variables() { return *mpRHS_Element_Variables; }; // Conditions std::vector<LocalSystemMatrixType>& GetLHS_Condition_Components() { return *mpLHS_Condition_Components; }; const std::vector< Variable< LocalSystemMatrixType > >& GetLHS_Condition_Variables() { return *mpLHS_Condition_Variables; }; std::vector<LocalSystemVectorType>& GetRHS_Condition_Components() { return *mpRHS_Condition_Components; }; const std::vector< Variable< LocalSystemVectorType > >& GetRHS_Condition_Variables() { return *mpRHS_Condition_Variables; }; ///@} }; ///@} ///@name Life Cycle ///@{ /** * @brief Default Constructor * @details Initiliazes the flags */ explicit Scheme() { mSchemeIsInitialized = false; mElementsAreInitialized = false; mConditionsAreInitialized = false; } /** Copy Constructor. */ explicit Scheme(Scheme& rOther) :mSchemeIsInitialized(rOther.mSchemeIsInitialized) ,mElementsAreInitialized(rOther.mElementsAreInitialized) ,mConditionsAreInitialized(rOther.mConditionsAreInitialized) { } /** Destructor. */ virtual ~Scheme() { } ///@} ///@name Operators ///@{ /** * @brief Clone method * @return The pointer of the cloned scheme */ virtual Pointer Clone() { return Kratos::make_shared<Scheme>(*this) ; } /** * @brief Component wise components Get method * @warning Must be defined on the derived classes * @return The local system of components */ virtual LocalSystemComponents& GetLocalSystemComponents() { KRATOS_ERROR << "Asking for Local Components to the SCHEME base class which is not component wise and not contains this member variable" << std::endl; } /** * @brief This is the place to initialize the Scheme. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ virtual void Initialize(ModelPart& rModelPart) { KRATOS_TRY mSchemeIsInitialized = true; KRATOS_CATCH("") } /** * @brief This method returns if the scheme is initialized * @return True if initilized, false otherwise */ bool SchemeIsInitialized() { return mSchemeIsInitialized; } /** * @brief This method sets if the elements have been initilized or not (true by default) * @param ElementsAreInitializedFlag If the flag must be set to true or false */ void SetSchemeIsInitialized(bool SchemeIsInitializedFlag = true) { mSchemeIsInitialized = SchemeIsInitializedFlag; } /** * @brief This method returns if the elements are initialized * @return True if initilized, false otherwise */ bool ElementsAreInitialized() { return mElementsAreInitialized; } /** * @brief This method sets if the elements have been initilized or not (true by default) * @param ElementsAreInitializedFlag If the flag must be set to true or false */ void SetElementsAreInitialized(bool ElementsAreInitializedFlag = true) { mElementsAreInitialized = ElementsAreInitializedFlag; } /** * @brief This method returns if the conditions are initialized * @return True if initilized, false otherwise */ bool ConditionsAreInitialized() { return mConditionsAreInitialized; } /** * @brief This method sets if the conditions have been initilized or not (true by default) * @param ConditionsAreInitializedFlag If the flag must be set to true or false */ void SetConditionsAreInitialized(bool ConditionsAreInitializedFlag = true) { mConditionsAreInitialized = ConditionsAreInitializedFlag; } /** * @brief This is the place to initialize the elements. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ virtual void InitializeElements( ModelPart& rModelPart) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); i++) { auto it_elem = rModelPart.ElementsBegin() + i; it_elem->Initialize(r_current_process_info); } SetElementsAreInitialized(); KRATOS_CATCH("") } /** * @brief This is the place to initialize the conditions. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ virtual void InitializeConditions(ModelPart& rModelPart) { KRATOS_TRY KRATOS_ERROR_IF_NOT(mElementsAreInitialized) << "Before initilizing Conditions, initialize Elements FIRST" << std::endl; const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); i++) { auto it_cond = rModelPart.ConditionsBegin() + i; it_cond->Initialize(r_current_process_info); } SetConditionsAreInitialized(); KRATOS_CATCH("") } /** * @brief Function called once at the beginning of each solution step. * @details The basic operations to be carried in there are the following: * - managing variables to be kept constant over the time step (for example time-Scheme constants depending on the actual time step) * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // Initializes solution step for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->InitializeSolutionStep(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // Initializes solution step for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->InitializeSolutionStep(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); // Initializes solution step for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->InitializeSolutionStep(r_current_process_info); } KRATOS_CATCH("") } /** * @brief Function called once at the end of a solution step, after convergence is reached if an iterative process is needed * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // Finalizes solution step for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->FinalizeSolutionStep(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // Finalizes solution step for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->FinalizeSolutionStep(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); // Finalizes solution step for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->FinalizeSolutionStep(r_current_process_info); } KRATOS_CATCH("") } /************************ BEGIN FRACTIONAL STEP METHODS ****************************/ /********************* TODO: DECIDE IF NECESSARY TO DEFINE *************************/ /***********************************************************************************/ // /** // * @brief Initializes solution step, to be used when system is not explicitely defined // * @details For example for fractional step strategies // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void InitializeSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } // // /** // * @brief Finalizes solution step, to be used when system is not explicitely defined // * @details For example for fractional step strategies // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void FinalizeSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } // // /** // * @brief Executed before each fractional step // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void InitializeFractionalSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } // // /** // * @brief Executed after each fractional step // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void FinalizeFractionalSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } /************************ END FRACTIONAL STEP METHODS ****************************/ /***********************************************************************************/ /** * @brief unction to be called when it is needed to initialize an iteration. It is designed to be called at the beginning of each non linear iteration * @note Take care: the elemental function with the same name is NOT called here. * @warning Must be defined in derived classes * @details The function is called in the builder for memory efficiency * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void InitializeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief It initializes a non-linear iteration (for an individual condition) * @warning Must be defined in derived classes * @param rCurrentElement The element to compute * @param rCurrentProcessInfo The current process info instance */ virtual void InitializeNonLinearIteration( Element::Pointer rCurrentElement, ProcessInfo& rCurrentProcessInfo ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief It initializes a non-linear iteration (for an individual condition) * @warning Must be defined in derived classes * @param rCurrentCondition The condition to compute * @param rCurrentProcessInfo The current process info instance */ virtual void InitializeNonLinearIteration( Condition::Pointer rCurrentCondition, ProcessInfo& rCurrentProcessInfo ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Function to be called when it is needed to finalize an iteration. It is designed to be called at the end of each non linear iteration * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void FinalizeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // Finalizes non-linear iteration for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->FinalizeNonLinearIteration(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // Finalizes non-linear iteration for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->FinalizeNonLinearIteration(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); // Finalizes non-linear iteration for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->FinalizeNonLinearIteration(r_current_process_info); } KRATOS_CATCH("") } /** * @brief Performing the prediction of the solution. * @warning Must be defined in derived classes * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Performing the update of the solution. * @warning Must be defined in derived classes * @param rModelPart The model part of the problem to solve * @param rDofSet Set of all primary variables * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Functions to be called to prepare the data needed for the output of results. * @warning Must be defined in derived classes * @param rModelPart The model part of the problem to solve * @param rDofSet Set of all primary variables * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void CalculateOutputData( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Functions that cleans the results data. * @warning Must be implemented in the derived classes */ virtual void CleanOutputData() { KRATOS_TRY KRATOS_CATCH("") } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed after the end of the solution step * @warning Must be implemented in the derived classes */ virtual void Clean() { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Function to clean up "element" scratch space after each element is built. * @param rElement The element to compute */ virtual void CleanMemory(Element& rElement) { this->CleanMemory(Element::Pointer(&rElement)); // TODO remove this after the transition period and uncomment the following // rElement.CleanMemory(); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void CleanMemory(Element::Pointer rCurrentElement) { rCurrentElement->CleanMemory(); } /** * @brief Function to clean up "condition" scratch space after each condition is built. * @param rCondition The condition to compute */ virtual void CleanMemory(Condition& rCondition) { this->CleanMemory(Condition::Pointer(&rCondition)); // TODO remove this after the transition period and uncomment the following // rCondition.CleanMemory(); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void CleanMemory(Condition::Pointer rCurrentCondition) { rCurrentCondition->CleanMemory(); } /** * @brief Liberate internal storage. * @warning Must be implemented in the derived classes */ virtual void Clear() { KRATOS_TRY KRATOS_CATCH("") } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @details Checks can be "expensive" as the function is designed * @param rModelPart The model part of the problem to solve * @return 0 all OK, 1 otherwise */ virtual int Check(const ModelPart& rModelPart) const { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Checks for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.NumberOfElements()); i++) { auto it_elem = rModelPart.ElementsBegin() + i; it_elem->Check(r_current_process_info); } // Checks for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.NumberOfConditions()); i++) { auto it_cond = rModelPart.ConditionsBegin() + i; it_cond->Check(r_current_process_info); } // Checks for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.NumberOfMasterSlaveConstraints()); i++) { auto it_constraint = rModelPart.MasterSlaveConstraintsBegin() + i; it_constraint->Check(r_current_process_info); } return 0; KRATOS_CATCH(""); } virtual int Check(ModelPart& rModelPart) { // calling the const version for backward compatibility const Scheme& r_const_this = *this; const ModelPart& r_const_model_part = rModelPart; return r_const_this.Check(r_const_model_part); } /** * @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme. * @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system * @param rElement The element to compute * @param LHS_Contribution The LHS matrix contribution * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateSystemContributions( Element& rElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->CalculateSystemContributions( Element::Pointer(&rElement), LHS_Contribution, RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void CalculateSystemContributions( Element::Pointer pCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param LHS_Contribution The LHS matrix contribution * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateSystemContributions( Condition& rCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Condition_CalculateSystemContributions( Condition::Pointer(&rCondition), LHS_Contribution, RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_CalculateSystemContributions( Condition::Pointer pCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } /** * @brief This function is designed to calculate just the RHS contribution * @param rElement The element to compute * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateRHSContribution( Element& rElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Calculate_RHS_Contribution( Element::Pointer(&rElement), RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rElement.CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Calculate_RHS_Contribution( Element::Pointer pCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateRHSContribution( Condition& rCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Condition_Calculate_RHS_Contribution( Condition::Pointer(&rCondition), RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rCondition.CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_Calculate_RHS_Contribution( Condition::Pointer pCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } /** * @brief This function is designed to calculate just the LHS contribution * @param rElement The element to compute * @param LHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateLHSContribution( Element& rElement, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Calculate_LHS_Contribution( Element::Pointer(&rElement), LHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rElement.CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Calculate_LHS_Contribution( Element::Pointer pCurrentElement, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param LHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateLHSContribution( Condition& rCondition, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Condition_Calculate_LHS_Contribution( Condition::Pointer(&rCondition), LHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rrCondition.CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_Calculate_LHS_Contribution( Condition::Pointer pCurrentCondition, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } /** * @brief This method gets the eqaution id corresponding to the current element * @param rElement The element to compute * @param rEquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void EquationId( const Element& rElement, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) { rElement.EquationIdVector(rEquationId, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void EquationId( Element::Pointer pCurrentElement, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { (pCurrentElement)->EquationIdVector(EquationId, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param rEquationId The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void EquationId( const Condition& rCondition, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) { rCondition.EquationIdVector(rEquationId, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_EquationId( Condition::Pointer pCurrentCondition, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { (pCurrentCondition)->EquationIdVector(EquationId, rCurrentProcessInfo); } /** * @brief Function that returns the list of Degrees of freedom to be assembled in the system for a Given element * @param pCurrentElement The element to compute * @param rDofList The list containing the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void GetDofList( const Element& rElement, Element::DofsVectorType& rDofList, const ProcessInfo& rCurrentProcessInfo ) { rElement.GetDofList(rDofList, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void GetElementalDofList( Element::Pointer pCurrentElement, Element::DofsVectorType& ElementalDofList, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->GetDofList(ElementalDofList, rCurrentProcessInfo); } /** * @brief Function that returns the list of Degrees of freedom to be assembled in the system for a Given condition * @param rCondition The condition to compute * @param rDofList The list containing the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void GetDofList( const Condition& rCondition, Element::DofsVectorType& rDofList, const ProcessInfo& rCurrentProcessInfo ) { rCondition.GetDofList(rDofList, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void GetConditionDofList( Condition::Pointer pCurrentCondition, Element::DofsVectorType& ConditionDofList, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->GetDofList(ConditionDofList, rCurrentProcessInfo); } ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return "Scheme"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << Info(); } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mSchemeIsInitialized; /// Flag to be used in controlling if the Scheme has been intialized or not bool mElementsAreInitialized; /// Flag taking in account if the elements were initialized correctly or not bool mConditionsAreInitialized; /// Flag taking in account if the conditions were initialized correctly or not ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class Scheme } // namespace Kratos. #endif /* KRATOS_SCHEME defined */
diagonalize_matrix_typed.c
#include "bml.h" #include "../typed.h" #include "../macros.h" #include "../C-interface/dense/bml_getters_dense.h" #include "../C-interface/bml_logger.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #if defined(SINGLE_REAL) || defined(SINGLE_COMPLEX) #define REL_TOL 1.2e-5 #else #define REL_TOL 1e-11 #endif int TYPED_FUNC( test_diagonalize) ( const int N, const bml_matrix_type_t matrix_type, const bml_matrix_precision_t matrix_precision, const int M) { bml_matrix_t *A = NULL; bml_matrix_t *A_t = NULL; REAL_T *eigenvalues = NULL; bml_matrix_t *eigenvectors = NULL; bml_matrix_t *ct = NULL; bml_matrix_t *aux = NULL; bml_matrix_t *aux1 = NULL; bml_matrix_t *aux2 = NULL; bml_matrix_t *id = NULL; float fnorm; int max_row = MIN(N, PRINT_THRESHOLD); int max_col = MIN(N, PRINT_THRESHOLD); LOG_INFO("rel. tolerance = %e\n", REL_TOL); bml_distribution_mode_t distrib_mode = sequential; #ifdef DO_MPI if (bml_getNRanks() > 1) { LOG_INFO("Use distributed matrix\n"); distrib_mode = distributed; } #endif A = bml_random_matrix(matrix_type, matrix_precision, N, M, distrib_mode); //LOG_INFO("A = \n"); //bml_print_bml_matrix(A, 0, max_row, 0, max_col); A_t = bml_transpose_new(A); //LOG_INFO("A_t = \n"); //bml_print_bml_matrix(A_t, 0, max_row, 0, max_col); bml_add(A, A_t, 0.5, 0.5, 0.0); LOG_INFO("(A + A_t)/2 = \n"); bml_print_bml_matrix(A, 0, max_row, 0, max_col); switch (matrix_precision) { case single_real: eigenvalues = bml_allocate_memory(N * sizeof(float)); #ifdef INTEL_OPT #pragma omp parallel for simd #pragma vector aligned for (int i = 0; i < N; i++) { __assume_aligned(eigenvalues, 64); eigenvalues[i] = 0.0; } #endif break; case double_real: eigenvalues = bml_allocate_memory(N * sizeof(double)); #ifdef INTEL_OPT #pragma omp parallel for simd #pragma vector aligned for (int i = 0; i < N; i++) { __assume_aligned(eigenvalues, 64); eigenvalues[i] = 0.0; } #endif break; case single_complex: eigenvalues = bml_allocate_memory(N * sizeof(float complex)); #ifdef INTEL_OPT #pragma omp parallel for simd #pragma vector aligned for (int i = 0; i < N; i++) { __assume_aligned(eigenvalues, 64); eigenvalues[i] = 0.0; } #endif break; case double_complex: eigenvalues = bml_allocate_memory(N * sizeof(double complex)); #ifdef INTEL_OPT #pragma omp parallel for simd #pragma vector aligned for (int i = 0; i < N; i++) { __assume_aligned(eigenvalues, 64); eigenvalues[i] = 0.0; } #endif break; default: LOG_DEBUG("matrix_precision is not set"); break; } eigenvectors = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode); aux = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode); aux1 = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode); aux2 = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode); bml_diagonalize(A, eigenvalues, eigenvectors); if (bml_getMyRank() == 0) { LOG_INFO("%s\n", "eigenvectors"); } bml_print_bml_matrix(eigenvectors, 0, max_row, 0, max_col); if (bml_getMyRank() == 0) { LOG_INFO("%s\n", "eigenvalues"); for (int i = 0; i < max_row; i++) LOG_INFO("val = %e i%e\n", REAL_PART(eigenvalues[i]), IMAGINARY_PART(eigenvalues[i])); } ct = bml_transpose_new(eigenvectors); if (bml_getMyRank() == 0) { LOG_INFO("%s\n", "transpose eigenvectors"); } bml_print_bml_matrix(ct, 0, max_row, 0, max_col); bml_multiply(ct, eigenvectors, aux2, 1.0, 0.0, 0.0); // C^t*C if (bml_getMyRank() == 0) LOG_INFO("C^t*C matrix:\n"); bml_print_bml_matrix(aux2, 0, max_row, 0, max_col); REAL_T *aux2_dense = bml_export_to_dense(aux2, dense_row_major); if (bml_getMyRank() == 0) { LOG_INFO("%s\n", "check eigenvectors norms"); for (int i = 0; i < N; i++) { REAL_T val = aux2_dense[i + N * i]; if (ABS(val - (REAL_T) 1.0) > REL_TOL) { LOG_INFO("i = %d, val = %e i%e\n", i, REAL_PART(val), IMAGINARY_PART(val)); LOG_ERROR ("Error in matrix diagonalization; eigenvector not normalized\n"); } } bml_free_memory(aux2_dense); } id = bml_identity_matrix(matrix_type, matrix_precision, N, M, distrib_mode); if (bml_getMyRank() == 0) LOG_INFO("Identity matrix:\n"); bml_print_bml_matrix(id, 0, max_row, 0, max_col); bml_add(aux2, id, 1.0, -1.0, 0.0); if (bml_getMyRank() == 0) LOG_INFO("C^txC^t-Id matrix:\n"); bml_print_bml_matrix(aux2, 0, max_row, 0, max_col); fnorm = bml_fnorm(aux2); if (fabsf(fnorm) > N * REL_TOL) { LOG_ERROR ("Error in matrix diagonalization; fnorm(C^txC^t-Id) = %e\n", fnorm); return -1; } bml_set_diagonal(aux1, eigenvalues, 0.0); if (bml_getMyRank() == 0) LOG_INFO("Matrix after setting diagonal:\n"); bml_print_bml_matrix(aux1, 0, max_row, 0, max_col); bml_multiply(aux1, ct, aux2, 1.0, 0.0, 0.0); // D*C^t bml_multiply(eigenvectors, aux2, aux, 1.0, 0.0, 0.0); // C*(D*C^t) if (bml_getMyRank() == 0) LOG_INFO("C*(D*C^t) matrix:\n"); bml_print_bml_matrix(aux, 0, max_row, 0, max_col); bml_add(aux, A, 1.0, -1.0, 0.0); if (bml_getMyRank() == 0) LOG_INFO("C*(D*C^t)-A matrix:\n"); bml_print_bml_matrix(aux, 0, max_row, 0, max_col); fnorm = bml_fnorm(aux); if (fabsf(fnorm) > N * REL_TOL || (fnorm != fnorm)) { LOG_ERROR ("Error in matrix diagonalization; fnorm(CDC^t-A) = %e\n", fnorm); return -1; } bml_deallocate(&A); bml_deallocate(&aux); bml_deallocate(&aux1); bml_deallocate(&aux2); bml_deallocate(&ct); bml_deallocate(&A_t); bml_deallocate(&eigenvectors); bml_deallocate(&id); bml_free_memory(eigenvalues); LOG_INFO("diagonalize matrix test passed\n"); return 0; }
sam1.c
/* multithreaded quickSort usage with gcc (version 4.2 or higher required): gcc -O -fopenmp -o quickSort-openmp quickSort-openmp.c ./quickSort-openmp size numWorkers */ #include <omp.h> #include <stdlib.h> #include <stdio.h> #define MAXSIZE 50000000 /* maximum array size */ #define MAXWORKERS 10 /* maximum number of workers */ void quickSort(int *inputArray, int size); void swap(int *inputArray, int leftIndex, int rightIndex); int partition(int*N, int p, int r){ double key=N[r]; int i=p-1; int j; double temp; for(j=p; j<r; j++){ if(N[j]<=key){ i+=1; temp = N[i]; N[i]=N[j]; N[j]=temp; } } temp = N[i+1]; N[i+1]=N[r]; N[r]=temp; return i+1; } void quickSortHelper(int* N, int p, int r){ if(p<r){ int q=partition(N,p,r); #pragma omp task //if(rightIndex-leftIndex > 1000) { quickSortHelper(N,p,q-1); } quickSortHelper(N,q+1,r); } } void sequentialQuickSort(int* N, int n){ quickSortHelper(N,0, n-1); } double start_time, end_time, s_start_time, s_end_time; /* start and end times */ long size; /* array size */ /* ---------------------------------------------------------------------------- */ /* read command line, initialize, and create threads */ int main(int argc, char *argv[]) { int i, numWorkers; /* read command line args if any */ size = (argc > 1)? atoi(argv[1]) : MAXSIZE; if (size > MAXSIZE){ size = MAXSIZE; } if (size < 2){ printf("Array is only one element!\n"); return 0; /* Invalid input array*/ } // numWorkers = (argc > 2)? atoi(argv[2]) : MAXWORKERS; // if (numWorkers > MAXWORKERS) numWorkers = MAXWORKERS; // omp_set_num_threads(numWorkers); /* Set number of threads */ int *inputArray; /* testDataArray */ inputArray = malloc(sizeof(int) * size); /* Allocate in memory instead */ srand(time(NULL)); /* Create testData array */ for (i = 0; i < size; i++) { inputArray[i] = rand()%99999; } #ifdef DEBUG printf("array size: %ld \n", size); printf("numWorkers: %d \n", numWorkers); printf("[ "); for (i = 0; i < size; i++) { printf(" %d", inputArray[i]); } printf(" ]\n"); #endif start_time = omp_get_wtime(); /* Call the quickSort function to sort the list */ #pragma omp parallel { /* We only want our master thread to be executed once, thus we use the singel construct here. nowait is used becuse we have no need for synchronization at the end of the region */ #pragma omp single nowait { sequentialQuickSort(inputArray, size); } } /* get end time */ end_time = omp_get_wtime(); for(i = 0; i<size-1; i++){ if(inputArray[i]>inputArray[i+1]){ printf("%d\n", i); printf("FUCK ME\n"); } } /*int myid = omp_get_thread_num(); */ /* print results */ // for (i = 0; i < size; i++) { // inputArray[i] = rand()%99999; // } // s_start_time = omp_get_wtime(); // sequentialQuickSort(inputArray, size); // s_end_time = omp_get_wtime(); #ifdef PRINT printf("[ "); for (i = 0; i < size; i++) { printf(" %d", inputArray[i]); } printf(" ]\n"); #endif free(inputArray); printf("The execution time is %g sec\n", end_time - start_time); return 0; } int * p(int *inputArray, int size){ int pivot, leftIndex, rightIndex; /* Set pivot */ pivot = inputArray[size/2]; for(leftIndex = 0, rightIndex = size -1;; leftIndex++, rightIndex--) { while(inputArray[leftIndex] < pivot){ leftIndex++; } while(pivot < inputArray[rightIndex]){ rightIndex--; } if(rightIndex <= leftIndex){ break; } swap(inputArray, leftIndex, rightIndex); } int * arr = malloc(sizeof(int)*2); arr[0]=rightIndex; arr[1]=leftIndex; return arr; } void quickSort(int *inputArray, int size){ if (size <= 1) { return; } int * arr = p(inputArray, size); int rightIndex = arr[0]; int leftIndex = arr[1]; #pragma omp task //if(rightIndex-leftIndex > 1000) { quickSort(inputArray, leftIndex); /* Sort lower */ } //#pragma omp task //{ quickSort(inputArray + rightIndex + 1, size - rightIndex -1); /* Sort upper */ //} } /* ---------------------------------------------------------------------------- */ /* Swaps two elements */ void swap(int *inputArray, int leftIndex, int rightIndex){ int temp; temp = inputArray[leftIndex]; inputArray[leftIndex] = inputArray[rightIndex]; inputArray[rightIndex] = temp; }
neon.h
#include <arm_neon.h> #include <cassert> #include "../../context.h" #include "../../lsc.h" #include "../../parallel.h" inline void get_assignment_value_vec( const Cluster* cluster, const uint8_t* img_quad_row, const uint16_t* spatial_dist_patch_row, const uint16_t* min_dist_row, const uint16_t* assignment_row, uint16x8_t cluster_number_vec, uint8x16_t cluster_color_vec, uint16x8_t& new_min_dist, uint16x8_t& new_assignment ) { uint16x8_t spatial_dist_vec = vld1q_u16(spatial_dist_patch_row); uint8x16_t image_segment = vld1q_u8(img_quad_row); uint8x16_t image_segment_2 = vld1q_u8(img_quad_row + 16); uint8x16_t abs_segment = vabdq_u8(image_segment, cluster_color_vec); uint8x16_t abs_segment_2 = vabdq_u8(image_segment_2, cluster_color_vec); uint32x4_t sad_segment = vpaddlq_u16(vpaddlq_u8(abs_segment)); uint32x4_t sad_segment_2 = vpaddlq_u16(vpaddlq_u8(abs_segment_2)); uint16x8_t color_dist_vec = vcombine_u16(vmovn_u32(sad_segment), vmovn_u32(sad_segment_2)); uint16x8_t dist_vec = vaddq_u16(color_dist_vec, spatial_dist_vec); uint16x8_t old_assignment = vld1q_u16(assignment_row); uint16x8_t old_min_dist = vld1q_u16(min_dist_row); new_min_dist = vminq_u16(old_min_dist, dist_vec); // 0xFFFF if a[i+15:i] == b[i+15:i], 0x0000 otherwise. uint16x8_t mask = vceqq_u16(old_min_dist, new_min_dist); // if mask[i+15:i] is not zero, choose a[i+15:i], otherwise choose b[i+15:i] new_assignment = vbslq_u16(mask, old_assignment, cluster_number_vec); } namespace fslic { class Context_ARM_NEON : public ContextSIMD { using ContextSIMD::ContextSIMD; virtual void assign_clusters(const Cluster **target_clusters, int size) { for (int cidx = 0; cidx < size; cidx++) { const Cluster *cluster = target_clusters[cidx]; uint16_t cluster_number = cluster->number; const uint16_t patch_width = spatial_dist_patch.get_width(); const uint16_t patch_width_multiple8 = patch_width & 0xFFF8; const int16_t cluster_y = cluster->y, cluster_x = cluster->x; const int16_t y_lo = cluster_y - S, x_lo = cluster_x - S; uint16x8_t cluster_number_vec = { cluster_number, cluster_number, cluster_number, cluster_number, cluster_number, cluster_number, cluster_number, cluster_number }; uint8x16_t cluster_color_vec = { (uint8_t)cluster->r, (uint8_t)cluster->g, (uint8_t)cluster->b, 0, (uint8_t)cluster->r, (uint8_t)cluster->g, (uint8_t)cluster->b, 0, (uint8_t)cluster->r, (uint8_t)cluster->g, (uint8_t)cluster->b, 0, (uint8_t)cluster->r, (uint8_t)cluster->g, (uint8_t)cluster->b, 0 }; int16_t patch_height = spatial_dist_patch.get_height(); for (int16_t i = fit_to_stride(y_lo) - y_lo; i < patch_height; i += subsample_stride) { const uint16_t* spatial_dist_patch_base_row = spatial_dist_patch.get_row(i); const uint8_t *img_quad_base_row = quad_image.get_row(y_lo + i, 4 * x_lo); uint16_t* assignment_base_row = assignment.get_row(i + y_lo, x_lo); uint16_t* min_dist_base_row = min_dists.get_row(i + y_lo, x_lo); #define ASSIGNMENT_VALUE_GETTER_BODY \ uint16x8_t new_min_dist, new_assignment; \ uint16_t* min_dist_row = min_dist_base_row + j; /* unaligned */ \ uint16_t* assignment_row = assignment_base_row + j; /* unaligned */ \ const uint8_t* img_quad_row = img_quad_base_row + 4 * j; /*Image rows are not aligned due to x_lo*/ \ const uint16_t* spatial_dist_patch_row = (uint16_t *)HINT_ALIGNED_AS(spatial_dist_patch_base_row + j, 16); /* Spatial distance patch is aligned */ \ get_assignment_value_vec( \ cluster, \ img_quad_row, \ spatial_dist_patch_row, \ min_dist_row, \ assignment_row, \ cluster_number_vec, \ cluster_color_vec, \ new_min_dist, \ new_assignment \ ); // (16 + 16)(batch size) / 4(rgba quad) = stride 8 for (int j = 0; j < patch_width_multiple8; j += 8) { ASSIGNMENT_VALUE_GETTER_BODY vst1q_u16(min_dist_row, new_min_dist); vst1q_u16(assignment_row, new_assignment); } if (0 < patch_width - patch_width_multiple8) { int j = patch_width_multiple8; int rem = patch_width - patch_width_multiple8; ASSIGNMENT_VALUE_GETTER_BODY uint16x4_t dist_4, assignment_4; if (rem >= 4) { vst1_u16(&min_dist_base_row[j], vget_low_u16(new_min_dist)); vst1_u16(&assignment_base_row[j], vget_low_u16(new_assignment)); rem -= 4; j += 4; dist_4 = vget_high_u16(new_min_dist); assignment_4 = vget_high_u16(new_assignment); } else { dist_4 = vget_low_u16(new_min_dist); assignment_4 = vget_low_u16(new_assignment); } switch (rem) { case 3: min_dist_base_row[j] = dist_4[0]; assignment_base_row[j] = assignment_4[0]; min_dist_base_row[j+1] = dist_4[1]; assignment_base_row[j+1] = assignment_4[1]; min_dist_base_row[j+2] = dist_4[2]; assignment_base_row[j+2] = assignment_4[2]; break; case 2: min_dist_base_row[j] = dist_4[0]; assignment_base_row[j] = assignment_4[0]; min_dist_base_row[j+1] = dist_4[1]; assignment_base_row[j+1] = assignment_4[1]; break; case 1: min_dist_base_row[j] = dist_4[0]; assignment_base_row[j] = assignment_4[0]; break; } } } } } }; inline float32x4_t _float32x4_set1(float v) { float32x4_t result = {v, v, v, v}; return result; } class ContextLSC_ARM_NEON : public ContextLSC { public: using ContextLSC::ContextLSC; protected: virtual void assign_clusters(const Cluster **target_clusters, int size) { const float* __restrict img_feats[10]; const float* __restrict centroid_feats[10]; for (int i = 0; i < 10; i++) { img_feats[i] = &image_features[i][0]; centroid_feats[i] = &centroid_features[i][0]; } for (int cidx = 0; cidx < size; cidx++) { const Cluster* cluster = target_clusters[cidx]; int cluster_y = cluster->y, cluster_x = cluster->x; uint16_t cluster_no = cluster->number; int y_lo = my_max<int>(cluster_y - S, 0), y_hi = my_min<int>(cluster_y + S + 1, H); int x_lo = my_max<int>(cluster_x - S, 0), x_hi = my_min<int>(cluster_x + S + 1, W); uint16x4_t cluster_number_vec = {cluster_no, cluster_no, cluster_no, cluster_no}; float32x4_t c_0 = _float32x4_set1(centroid_feats[0][cluster_no]); float32x4_t c_1 = _float32x4_set1(centroid_feats[1][cluster_no]); float32x4_t c_2 = _float32x4_set1(centroid_feats[2][cluster_no]); float32x4_t c_3 = _float32x4_set1(centroid_feats[3][cluster_no]); float32x4_t c_4 = _float32x4_set1(centroid_feats[4][cluster_no]); float32x4_t c_5 = _float32x4_set1(centroid_feats[5][cluster_no]); float32x4_t c_6 = _float32x4_set1(centroid_feats[6][cluster_no]); float32x4_t c_7 = _float32x4_set1(centroid_feats[7][cluster_no]); float32x4_t c_8 = _float32x4_set1(centroid_feats[8][cluster_no]); float32x4_t c_9 = _float32x4_set1(centroid_feats[9][cluster_no]); for (int i = y_lo; i < y_hi; i++) { if (!valid_subsample_row(i)) continue; for (int j = x_lo; j < x_hi; j += 4) { float* __restrict min_dist_row = min_dists.get_row(i, j); uint16_t* __restrict assignment_row = assignment.get_row(i, j); int index = W * i + j; float32x4_t f_0 = vld1q_f32(&img_feats[0][index]); float32x4_t d_0 = vsubq_f32(f_0, c_0); float32x4_t f_1 = vld1q_f32(&img_feats[1][index]); float32x4_t d_1 = vsubq_f32(f_1, c_1); float32x4_t f_2 = vld1q_f32(&img_feats[2][index]); float32x4_t d_2 = vsubq_f32(f_2, c_2); float32x4_t f_3 = vld1q_f32(&img_feats[3][index]); float32x4_t d_3 = vsubq_f32(f_3, c_3); float32x4_t f_4 = vld1q_f32(&img_feats[4][index]); float32x4_t d_4 = vsubq_f32(f_4, c_4); float32x4_t f_5 = vld1q_f32(&img_feats[5][index]); float32x4_t d_5 = vsubq_f32(f_5, c_5); float32x4_t f_6 = vld1q_f32(&img_feats[6][index]); float32x4_t d_6 = vsubq_f32(f_6, c_6); float32x4_t f_7 = vld1q_f32(&img_feats[7][index]); float32x4_t d_7 = vsubq_f32(f_7, c_7); float32x4_t f_8 = vld1q_f32(&img_feats[8][index]); float32x4_t d_8 = vsubq_f32(f_8, c_8); float32x4_t f_9 = vld1q_f32(&img_feats[9][index]); float32x4_t d_9 = vsubq_f32(f_9, c_9); float32x4_t dist_vec = vmulq_f32(d_0, d_0); dist_vec = vmlaq_f32(dist_vec, d_1, d_1); dist_vec = vmlaq_f32(dist_vec, d_2, d_2); dist_vec = vmlaq_f32(dist_vec, d_3, d_3); dist_vec = vmlaq_f32(dist_vec, d_4, d_4); dist_vec = vmlaq_f32(dist_vec, d_4, d_4); dist_vec = vmlaq_f32(dist_vec, d_5, d_5); dist_vec = vmlaq_f32(dist_vec, d_6, d_6); dist_vec = vmlaq_f32(dist_vec, d_7, d_7); dist_vec = vmlaq_f32(dist_vec, d_8, d_8); dist_vec = vmlaq_f32(dist_vec, d_9, d_9); float32x4_t old_min_dist = vld1q_f32(min_dist_row); uint16x4_t old_assignment = vld1_u16(assignment_row); float32x4_t new_min_dist = vminq_f32(old_min_dist, dist_vec); // 0xFFFF if a[i+15:i] == b[i+15:i], 0x0000 otherwise. uint16x4_t mask = vmovn_u32(vceqq_f32(old_min_dist, new_min_dist)); // if mask[i+15:i] is not zero, choose a[i+15:i], otherwise choose b[i+15:i] uint16x4_t new_assignment = vbsl_u16(mask, old_assignment, cluster_number_vec); int rem = x_hi - j; if (rem >= 4) { vst1_u16(assignment_row, new_assignment); vst1q_f32(min_dist_row, new_min_dist); } else { uint16_t arr_assignment[4]; float arr_dist[4]; vst1_u16(arr_assignment, new_assignment); vst1q_f32(arr_dist, new_min_dist); for (int delta = 0; delta < rem; delta++) { assignment_row[delta] = arr_assignment[delta]; min_dist_row[delta] = arr_dist[delta]; } } } } } } virtual void normalize_features(float * __restrict numers[10], float* __restrict weights, int size) { #pragma omp parallel for num_threads(fsparallel::nth()) for (int i = 0; i < size; i += 4) { float32x4_t reciprocal_w = vrecpeq_f32(vld1q_f32(&weights[i])); vst1q_f32(&numers[0][i], vmulq_f32(vld1q_f32(&numers[0][i]), reciprocal_w)); vst1q_f32(&numers[1][i], vmulq_f32(vld1q_f32(&numers[1][i]), reciprocal_w)); vst1q_f32(&numers[2][i], vmulq_f32(vld1q_f32(&numers[2][i]), reciprocal_w)); vst1q_f32(&numers[3][i], vmulq_f32(vld1q_f32(&numers[3][i]), reciprocal_w)); vst1q_f32(&numers[4][i], vmulq_f32(vld1q_f32(&numers[4][i]), reciprocal_w)); vst1q_f32(&numers[5][i], vmulq_f32(vld1q_f32(&numers[5][i]), reciprocal_w)); vst1q_f32(&numers[6][i], vmulq_f32(vld1q_f32(&numers[6][i]), reciprocal_w)); vst1q_f32(&numers[7][i], vmulq_f32(vld1q_f32(&numers[7][i]), reciprocal_w)); vst1q_f32(&numers[8][i], vmulq_f32(vld1q_f32(&numers[8][i]), reciprocal_w)); vst1q_f32(&numers[9][i], vmulq_f32(vld1q_f32(&numers[9][i]), reciprocal_w)); } } }; };
GB_unaryop__minv_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_uint16 // op(A') function: GB_tran__minv_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_uint16 ( uint16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
joseph3d_back_tof_sino_2.c
/** * @file joseph3d_back_tof_sino_2.c */ #include<stdio.h> #include<stdlib.h> #include<stdint.h> #include<math.h> #include<omp.h> #include "tof_utils.h" #include "ray_cube_intersection.h" /** @brief 3D sinogram tof joseph back projector * * All threads back project in one image using openmp's atomic add. * * @param xstart array of shape [3*nlors] with the coordinates of the start points of the LORs. * The start coordinates of the n-th LOR are at xstart[n*3 + i] with i = 0,1,2 * @param xend array of shape [3*nlors] with the coordinates of the end points of the LORs. * The start coordinates of the n-th LOR are at xstart[n*3 + i] with i = 0,1,2 * @param img array of shape [n0*n1*n2] containing the 3D image used for back projection (output). * The pixel [i,j,k] ist stored at [n1*n2*i + n2*j + k]. * @param img_origin array [x0_0,x0_1,x0_2] of coordinates of the center of the [0,0,0] voxel * @param voxsize array [vs0, vs1, vs2] of the voxel sizes * @param p array of length nlors with the values to be back projected * @param nlors number of geometrical LORs * @param img_dim array with dimensions of image [n0,n1,n2] * @param tofbin_width width of the TOF bins in spatial units (units of xstart and xend) * @param sigma_tof array of length nlors with the TOF resolution (sigma) for each LOR in * spatial units (units of xstart and xend) * @param tofcenter_offset array of length nlors with the offset of the central TOF bin from the * midpoint of each LOR in spatial units (units of xstart and xend) * @param n_sigmas number of sigmas to consider for calculation of TOF kernel * @param n_tofbins number of TOF bins */ void joseph3d_back_tof_sino_2(const float *xstart, const float *xend, float *img, const float *img_origin, const float *voxsize, const float *p, long long nlors, const int *img_dim, float tofbin_width, const float *sigma_tof, const float *tofcenter_offset, float n_sigmas, short n_tofbins) { long long i; int n0 = img_dim[0]; int n1 = img_dim[1]; int n2 = img_dim[2]; int n_half = n_tofbins/2; # pragma omp parallel for schedule(static) for(i = 0; i < nlors; i++) { float d0, d1, d2, d0_sq, d1_sq, d2_sq; float cs0, cs1, cs2, cf; float lsq, cos0_sq, cos1_sq, cos2_sq; unsigned short direction; int i0, i1, i2; int i0_floor, i1_floor, i2_floor; int i0_ceil, i1_ceil, i2_ceil; float x_pr0, x_pr1, x_pr2; float tmp_0, tmp_1, tmp_2; float u0, u1, u2, d_norm; float x_m0, x_m1, x_m2; float x_v0, x_v1, x_v2; int it, it1, it2; float dtof, tw; float sig_tof = sigma_tof[i]; float tc_offset = tofcenter_offset[i]; float xstart0 = xstart[i*3 + 0]; float xstart1 = xstart[i*3 + 1]; float xstart2 = xstart[i*3 + 2]; float xend0 = xend[i*3 + 0]; float xend1 = xend[i*3 + 1]; float xend2 = xend[i*3 + 2]; float voxsize0 = voxsize[0]; float voxsize1 = voxsize[1]; float voxsize2 = voxsize[2]; float img_origin0 = img_origin[0]; float img_origin1 = img_origin[1]; float img_origin2 = img_origin[2]; unsigned char intersec; float t1, t2; float istart_f, iend_f, tmp; int istart, iend; float istart_tof_f, iend_tof_f; int istart_tof, iend_tof; // test whether the ray between the two detectors is most parallel // with the 0, 1, or 2 axis d0 = xend0 - xstart0; d1 = xend1 - xstart1; d2 = xend2 - xstart2; //----------- //--- test whether ray and cube intersect intersec = ray_cube_intersection(xstart0, xstart1, xstart2, img_origin0 - 1*voxsize0, img_origin1 - 1*voxsize1, img_origin2 - 1*voxsize2, img_origin0 + n0*voxsize0, img_origin1 + n1*voxsize1, img_origin2 + n2*voxsize2, d0, d1, d2, &t1, &t2); if (intersec == 1) { d0_sq = d0*d0; d1_sq = d1*d1; d2_sq = d2*d2; lsq = d0_sq + d1_sq + d2_sq; cos0_sq = d0_sq / lsq; cos1_sq = d1_sq / lsq; cos2_sq = d2_sq / lsq; cs0 = sqrtf(cos0_sq); cs1 = sqrtf(cos1_sq); cs2 = sqrtf(cos2_sq); direction = 0; if ((cos1_sq >= cos0_sq) && (cos1_sq >= cos2_sq)) { direction = 1; } if ((cos2_sq >= cos0_sq) && (cos2_sq >= cos1_sq)) { direction = 2; } //--------------------------------------------------------- //--- calculate TOF related quantities // unit vector (u0,u1,u2) that points from xstart to end d_norm = sqrtf(lsq); u0 = d0 / d_norm; u1 = d1 / d_norm; u2 = d2 / d_norm; // calculate mid point of LOR x_m0 = 0.5f*(xstart0 + xend0); x_m1 = 0.5f*(xstart1 + xend1); x_m2 = 0.5f*(xstart2 + xend2); //--------------------------------------------------------- if(direction == 0) { // case where ray is most parallel to the 0 axis // we step through the volume along the 0 direction // factor for correctiong voxel size and |cos(theta)| cf = voxsize0/cs0; //--- check where ray enters / leaves cube istart_f = (xstart0 + t1*d0 - img_origin0) / voxsize0; iend_f = (xstart0 + t2*d0 - img_origin0) / voxsize0; if (istart_f > iend_f){ tmp = iend_f; iend_f = istart_f; istart_f = tmp; } istart = (int)floor(istart_f); iend = (int)ceil(iend_f); if (istart < 0){istart = 0;} if (iend >= n0){iend = n0;} //--- for(i0 = istart; i0 < iend; i0++) { // get the indices where the ray intersects the image plane x_pr1 = xstart1 + (img_origin0 + i0*voxsize0 - xstart0)*d1 / d0; x_pr2 = xstart2 + (img_origin0 + i0*voxsize0 - xstart0)*d2 / d0; i1_floor = (int)floor((x_pr1 - img_origin1)/voxsize1); i1_ceil = i1_floor + 1; i2_floor = (int)floor((x_pr2 - img_origin2)/voxsize2); i2_ceil = i2_floor + 1; // calculate the distances to the floor normalized to [0,1] // for the bilinear interpolation tmp_1 = (x_pr1 - (i1_floor*voxsize1 + img_origin1)) / voxsize1; tmp_2 = (x_pr2 - (i2_floor*voxsize2 + img_origin2)) / voxsize2; //--------- TOF related quantities // calculate the voxel center needed for TOF weights x_v0 = img_origin0 + i0*voxsize0; x_v1 = x_pr1; x_v2 = x_pr2; it1 = -n_half; it2 = n_half; // get the relevant tof bins (the TOF bins where the TOF weight is not close to 0) relevant_tof_bins(x_m0, x_m1, x_m2, x_v0, x_v1, x_v2, u0, u1, u2, tofbin_width, tc_offset, sig_tof, n_sigmas, n_half, &it1, &it2); for(it = it1; it <= it2; it++){ //--- add extra check to be compatible with behavior of LM projector istart_tof_f = (x_m0 + (it*tofbin_width - n_sigmas*sig_tof)*u0 - img_origin0) / voxsize0; iend_tof_f = (x_m0 + (it*tofbin_width + n_sigmas*sig_tof)*u0 - img_origin0) / voxsize0; if (istart_tof_f > iend_tof_f){ tmp = iend_tof_f; iend_tof_f = istart_tof_f; istart_tof_f = tmp; } istart_tof = (int)floor(istart_tof_f); iend_tof = (int)ceil(iend_tof_f); //--- if ((i0 >= istart_tof) && (i0 < iend_tof)){ if(p[i*n_tofbins + it + n_half] != 0){ // calculate distance of voxel to tof bin center dtof = sqrtf(powf((x_m0 + (it*tofbin_width + tc_offset)*u0 - x_v0), 2) + powf((x_m1 + (it*tofbin_width + tc_offset)*u1 - x_v1), 2) + powf((x_m2 + (it*tofbin_width + tc_offset)*u2 - x_v2), 2)); //calculate the TOF weight tw = 0.5f*(erff_as((dtof + 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)) - erff_as((dtof - 0.5f*tofbin_width)/(sqrtf(2)*sig_tof))); if ((i1_floor >= 0) && (i1_floor < n1) && (i2_floor >= 0) && (i2_floor < n2)) { #pragma omp atomic img[n1*n2*i0 + n2*i1_floor + i2_floor] += (tw * p[i*n_tofbins + it + n_half] * (1 - tmp_1) * (1 - tmp_2) * cf); } if ((i1_ceil >= 0) && (i1_ceil < n1) && (i2_floor >= 0) && (i2_floor < n2)) { #pragma omp atomic img[n1*n2*i0 + n2*i1_ceil + i2_floor] += (tw * p[i*n_tofbins + it + n_half] * tmp_1 * (1 - tmp_2) * cf); } if ((i1_floor >= 0) && (i1_floor < n1) && (i2_ceil >= 0) && (i2_ceil < n2)) { #pragma omp atomic img[n1*n2*i0 + n2*i1_floor + i2_ceil] += (tw * p[i*n_tofbins + it + n_half] * (1 - tmp_1) * tmp_2*cf); } if ((i1_ceil >= 0) && (i1_ceil < n1) && (i2_ceil >= 0) && (i2_ceil < n2)) { #pragma omp atomic img[n1*n2*i0 + n2*i1_ceil + i2_ceil] += (tw * p[i*n_tofbins + it + n_half] * tmp_1 * tmp_2 * cf); } } } } } } // --------------------------------------------------------------------------------- if(direction == 1) { // case where ray is most parallel to the 1 axis // we step through the volume along the 1 direction // factor for correctiong voxel size and |cos(theta)| cf = voxsize1/cs1; //--- check where ray enters / leaves cube istart_f = (xstart1 + t1*d1 - img_origin1) / voxsize1; iend_f = (xstart1 + t2*d1 - img_origin1) / voxsize1; if (istart_f > iend_f){ tmp = iend_f; iend_f = istart_f; istart_f = tmp; } istart = (int)floor(istart_f); iend = (int)ceil(iend_f); if (istart < 0){istart = 0;} if (iend >= n1){iend = n1;} //--- for(i1 = istart; i1 < iend; i1++) { // get the indices where the ray intersects the image plane x_pr0 = xstart0 + (img_origin1 + i1*voxsize1 - xstart1)*d0 / d1; x_pr2 = xstart2 + (img_origin1 + i1*voxsize1 - xstart1)*d2 / d1; i0_floor = (int)floor((x_pr0 - img_origin0)/voxsize0); i0_ceil = i0_floor + 1; i2_floor = (int)floor((x_pr2 - img_origin2)/voxsize2); i2_ceil = i2_floor + 1; // calculate the distances to the floor normalized to [0,1] // for the bilinear interpolation tmp_0 = (x_pr0 - (i0_floor*voxsize0 + img_origin0)) / voxsize0; tmp_2 = (x_pr2 - (i2_floor*voxsize2 + img_origin2)) / voxsize2; //--------- TOF related quantities // calculate the voxel center needed for TOF weights x_v0 = x_pr0; x_v1 = img_origin1 + i1*voxsize1; x_v2 = x_pr2; it1 = -n_half; it2 = n_half; // get the relevant tof bins (the TOF bins where the TOF weight is not close to 0) relevant_tof_bins(x_m0, x_m1, x_m2, x_v0, x_v1, x_v2, u0, u1, u2, tofbin_width, tc_offset, sig_tof, n_sigmas, n_half, &it1, &it2); for(it = it1; it <= it2; it++){ //--- add extra check to be compatible with behavior of LM projector istart_tof_f = (x_m1 + (it*tofbin_width - n_sigmas*sig_tof)*u1 - img_origin1) / voxsize1; iend_tof_f = (x_m1 + (it*tofbin_width + n_sigmas*sig_tof)*u1 - img_origin1) / voxsize1; if (istart_tof_f > iend_tof_f){ tmp = iend_tof_f; iend_tof_f = istart_tof_f; istart_tof_f = tmp; } istart_tof = (int)floor(istart_tof_f); iend_tof = (int)ceil(iend_tof_f); //--- if ((i1 >= istart_tof) && (i1 < iend_tof)){ if(p[i*n_tofbins + it + n_half] != 0){ // calculate distance of voxel to tof bin center dtof = sqrtf(powf((x_m0 + (it*tofbin_width + tc_offset)*u0 - x_v0), 2) + powf((x_m1 + (it*tofbin_width + tc_offset)*u1 - x_v1), 2) + powf((x_m2 + (it*tofbin_width + tc_offset)*u2 - x_v2), 2)); //calculate the TOF weight tw = 0.5f*(erff_as((dtof + 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)) - erff_as((dtof - 0.5f*tofbin_width)/(sqrtf(2)*sig_tof))); if ((i0_floor >= 0) && (i0_floor < n0) && (i2_floor >= 0) && (i2_floor < n2)) { #pragma omp atomic img[n1*n2*i0_floor + n2*i1 + i2_floor] += (tw * p[i*n_tofbins + it + n_half] * (1 - tmp_0) * (1 - tmp_2) * cf); } if ((i0_ceil >= 0) && (i0_ceil < n0) && (i2_floor >= 0) && (i2_floor < n2)) { #pragma omp atomic img[n1*n2*i0_ceil + n2*i1 + i2_floor] += (tw * p[i*n_tofbins + it + n_half] * tmp_0 * (1 - tmp_2) * cf); } if ((i0_floor >= 0) && (i0_floor < n0) && (i2_ceil >= 0) && (i2_ceil < n2)) { #pragma omp atomic img[n1*n2*i0_floor + n2*i1 + i2_ceil] += (tw * p[i*n_tofbins + it + n_half] * (1 - tmp_0) * tmp_2 * cf); } if((i0_ceil >= 0) && (i0_ceil < n0) && (i2_ceil >= 0) && (i2_ceil < n2)) { #pragma omp atomic img[n1*n2*i0_ceil + n2*i1 + i2_ceil] += (tw * p[i*n_tofbins + it + n_half] * tmp_0 * tmp_2 * cf); } } } } } } //--------------------------------------------------------------------------------- if (direction == 2) { // case where ray is most parallel to the 2 axis // we step through the volume along the 2 direction // factor for correctiong voxel size and |cos(theta)| cf = voxsize2/cs2; //--- check where ray enters / leaves cube istart_f = (xstart2 + t1*d2 - img_origin2) / voxsize2; iend_f = (xstart2 + t2*d2 - img_origin2) / voxsize2; if (istart_f > iend_f){ tmp = iend_f; iend_f = istart_f; istart_f = tmp; } istart = (int)floor(istart_f); iend = (int)ceil(iend_f); if (istart < 0){istart = 0;} if (iend >= n2){iend = n2;} //--- for(i2 = istart; i2 < iend; i2++) { // get the indices where the ray intersects the image plane x_pr0 = xstart0 + (img_origin2 + i2*voxsize2 - xstart2)*d0 / d2; x_pr1 = xstart1 + (img_origin2 + i2*voxsize2 - xstart2)*d1 / d2; i0_floor = (int)floor((x_pr0 - img_origin0)/voxsize0); i0_ceil = i0_floor + 1; i1_floor = (int)floor((x_pr1 - img_origin1)/voxsize1); i1_ceil = i1_floor + 1; // calculate the distances to the floor normalized to [0,1] // for the bilinear interpolation tmp_0 = (x_pr0 - (i0_floor*voxsize0 + img_origin0)) / voxsize0; tmp_1 = (x_pr1 - (i1_floor*voxsize1 + img_origin1)) / voxsize1; //--------- TOF related quantities // calculate the voxel center needed for TOF weights x_v0 = x_pr0; x_v1 = x_pr1; x_v2 = img_origin2 + i2*voxsize2; it1 = -n_half; it2 = n_half; // get the relevant tof bins (the TOF bins where the TOF weight is not close to 0) relevant_tof_bins(x_m0, x_m1, x_m2, x_v0, x_v1, x_v2, u0, u1, u2, tofbin_width, tc_offset, sig_tof, n_sigmas, n_half, &it1, &it2); for(it = it1; it <= it2; it++){ //--- add extra check to be compatible with behavior of LM projector istart_tof_f = (x_m2 + (it*tofbin_width - n_sigmas*sig_tof)*u2 - img_origin2) / voxsize2; iend_tof_f = (x_m2 + (it*tofbin_width + n_sigmas*sig_tof)*u2 - img_origin2) / voxsize2; if (istart_tof_f > iend_tof_f){ tmp = iend_tof_f; iend_tof_f = istart_tof_f; istart_tof_f = tmp; } istart_tof = (int)floor(istart_tof_f); iend_tof = (int)ceil(iend_tof_f); //--- if ((i2 >= istart_tof) && (i2 < iend_tof)){ if(p[i*n_tofbins + it + n_half] != 0){ // calculate distance of voxel to tof bin center dtof = sqrtf(powf((x_m0 + (it*tofbin_width + tc_offset)*u0 - x_v0), 2) + powf((x_m1 + (it*tofbin_width + tc_offset)*u1 - x_v1), 2) + powf((x_m2 + (it*tofbin_width + tc_offset)*u2 - x_v2), 2)); //calculate the TOF weight tw = 0.5f*(erff_as((dtof + 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)) - erff_as((dtof - 0.5f*tofbin_width)/(sqrtf(2)*sig_tof))); if ((i0_floor >= 0) && (i0_floor < n0) && (i1_floor >= 0) && (i1_floor < n1)) { #pragma omp atomic img[n1*n2*i0_floor + n2*i1_floor + i2] += (tw * p[i*n_tofbins + it + n_half] * (1 - tmp_0) * (1 - tmp_1) * cf); } if ((i0_ceil >= 0) && (i0_ceil < n0) && (i1_floor >= 0) && (i1_floor < n1)) { #pragma omp atomic img[n1*n2*i0_ceil + n2*i1_floor + i2] += (tw * p[i*n_tofbins + it + n_half] * tmp_0 * (1 - tmp_1) * cf); } if ((i0_floor >= 0) && (i0_floor < n0) && (i1_ceil >= 0) && (i1_ceil < n1)) { #pragma omp atomic img[n1*n2*i0_floor + n2*i1_ceil + i2] += (tw * p[i*n_tofbins + it + n_half] * (1 - tmp_0) * tmp_1 * cf); } if ((i0_ceil >= 0) && (i0_ceil < n0) && (i1_ceil >= 0) && (i1_ceil < n1)) { #pragma omp atomic img[n1*n2*i0_ceil + n2*i1_ceil + i2] += (tw * p[i*n_tofbins + it + n_half] * tmp_0 * tmp_1 * cf); } } } } } } } } }
conv_kernel_mips.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "conv_kernel_mips.h" #include "wino_conv_kernel_mips.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <string.h> #include <stdint.h> #include <stdlib.h> #include <math.h> #if __mips_msa #include <msa.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static int get_private_mem_size(struct tensor* filter) { return filter->elem_num * filter->elem_size; // caution } static void interleave(struct tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } void im2col(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int outc, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } static void im2col_ir(struct tensor* input, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size; void* im2col_buf = priv_info->im2col_buffer; int input_zero = 0; if (input->data_type == TENGINE_DT_UINT8) input_zero = input->zero_point; im2col(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], output->dims[1] / param->group, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 2; int remian_size_start = nn_size << 2; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const float* img = pB + i; float* tmp = pB_t + (i / 4) * 4 * K; for (int j = 0; j < K; j++) { #if __mips_msa __msa_st_w(__msa_ld_w(img, 0), tmp, 0); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; #endif // __mips_msa tmp += 4; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 4 + i % 4) * 4 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } // unloop output M, unloop N, packet 4x4, using intrinsic static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 2; remain_outch_start = nn_outch << 2; // output ch0 - ch3 #pragma omp parallel for num_threads(num_thread) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 3 < N; j += 4) { float* va = pA_t + (i / 4) * 4 * K; float* vb = pB_t + (j / 4) * 4 * K; #if __mips_msa v4f32 _sum0 = {0.f}; v4f32 _sum1 = {0.f}; v4f32 _sum2 = {0.f}; v4f32 _sum3 = {0.f}; for (int k = 0; k < K; k++) { // k0 __builtin_prefetch(vb + 32); __builtin_prefetch(va + 32); v4f32 _vb = (v4f32)__msa_ld_w(vb, 0); v4i32 _va0123 = __msa_ld_w(va, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb, (v4f32)__msa_splati_w(_va0123, 0))); // sum0 = (a00-a03) * k00 _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_vb, (v4f32)__msa_splati_w(_va0123, 1))); // sum1 = (a00-a03) * k10 _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_vb, (v4f32)__msa_splati_w(_va0123, 2))); // sum2 = (a00-a03) * k20 _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_vb, (v4f32)__msa_splati_w(_va0123, 3))); // sum3 = (a00-a03) * k30 va += 4; vb += 4; } __msa_st_w((v4i32)_sum0, output0, 0); __msa_st_w((v4i32)_sum1, output1, 0); __msa_st_w((v4i32)_sum2, output2, 0); __msa_st_w((v4i32)_sum3, output3, 0); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __mips_msa output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { float* va = pA_t + (i / 4) * 4 * K; float* vb = pB_t + (j / 4 + j % 4) * 4 * K; #if __mips_msa v4f32 _sum0_3 = {0.f}; v4f32 _sum0 = {0.f}; v4f32 _sum1 = {0.f}; v4f32 _sum2 = {0.f}; v4f32 _sum3 = {0.f}; int k = 0; for (; k + 3 < K; k = k + 4) { __builtin_prefetch(vb + 32); __builtin_prefetch(va + 128); v4i32 _vb0123 = __msa_ld_w(vb, 0); v4f32 _va0 = (v4f32)__msa_ld_w(va, 0); v4f32 _va1 = (v4f32)__msa_ld_w(va + 4, 0); v4f32 _va2 = (v4f32)__msa_ld_w(va + 8, 0); v4f32 _va3 = (v4f32)__msa_ld_w(va + 12, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_va0, (v4f32)__msa_splati_w(_vb0123, 0))); // sum0 += (k00-k30) * a00 _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_va1, (v4f32)__msa_splati_w(_vb0123, 1))); // sum1 += (k01-k31) * a10 _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_va2, (v4f32)__msa_splati_w(_vb0123, 2))); // sum2 += (k02-k32) * a20 _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_va3, (v4f32)__msa_splati_w(_vb0123, 3))); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = __msa_fadd_w(_sum0, _sum1); _sum2 = __msa_fadd_w(_sum2, _sum3); _sum0_3 = __msa_fadd_w(_sum2, _sum0); // _sum0_3 = __msa_fadd_w(_sum0_3, _sum2); for (; k < K; k++) { v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]}; v4f32 _va = (v4f32)__msa_ld_w(va, 0); _sum0_3 = __msa_fadd_w(_sum0_3, __msa_fmul_w(_va, _vb0)); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3[0]; output1[0] = _sum0_3[1]; output2[0] = _sum0_3[2]; output3[0] = _sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __mips_msa output0++; output1++; output2++; output3++; } } // output ch0 #pragma omp parallel for num_threads(num_thread) for (int i=remain_outch_start; i<M; i++) { float* output = pC + i * N; int j = 0; for (; j + 3 < N; j += 4) { float* va = pA_t + (i / 4 + i % 4) * 4 * K; float* vb = pB_t + (j / 4) * 4 * K; #if __mips_msa v4f32 _sum0 = {0.f}; int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __builtin_prefetch(va + 32); __builtin_prefetch(vb + 128); v4i32 _va0123 = __msa_ld_w(va, 0); v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0); v4f32 _vb1 = (v4f32)__msa_ld_w(vb + 4, 0); v4f32 _vb2 = (v4f32)__msa_ld_w(vb + 8, 0); v4f32 _vb3 = (v4f32)__msa_ld_w(vb + 12, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, (v4f32)__msa_splati_w(_va0123, 0))); // sum0 = (a00-a03) * k00 _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb1, (v4f32)__msa_splati_w(_va0123, 1))); // sum0 += (a10-a13) * k01 _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb2, (v4f32)__msa_splati_w(_va0123, 2))); // sum0 += (a20-a23) * k02 _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb3, (v4f32)__msa_splati_w(_va0123, 3))); // sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k < K; k++) { // k0 v4f32 _va0 = {va[0]}; v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } __msa_st_w((v4i32)_sum0, output, 0); #else float sum[4] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } #endif // __mips_msa output += 4; } for (; j < N; j++) { float* va = pA_t + (i / 4 + i % 4) * 4 * K; float* vb = pB_t + (j / 4 + j % 4) * 4 * K; int k = 0; #if __mips_msa v4f32 _sum0 = {0.f}; for (; k + 3 < K; k += 4) { __builtin_prefetch(vb + 32); __builtin_prefetch(va + 32); v4f32 _p0 = (v4f32)__msa_ld_w(vb, 0); v4f32 _k0 = (v4f32)__msa_ld_w(va, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_p0, _k0)); va += 4; vb += 4; } float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = 0.f; #endif // __mips_msa for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; return (4 * K * (N / 4 + N % 4)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct tensor* filter) { int size = 4 * K * (M / 4 + M % 4) * filter->elem_size; return size; } void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 2; int remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 4) * 4 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 4 + p % 4) * 4 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; conv_hcl_interleave_pack4(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; float* im2col_fp32 = priv_info->im2col_buffer; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; input_pack4(K, N, im2col_fp32, im2col_pack4_fp32, num_thread); if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
vec_add_sections.c
/* sections Directive Example */ #include <omp.h> #define N 1000 main(int argc, char *argv[]) { int i; float a[N], b[N], c[N], d[N]; /* Some initializations */ for (i=0; i < N; i++) { a[i] = i * 1.5; b[i] = i + 22.35; } #pragma omp parallel shared(a,b,c,d) private(i) { #pragma omp sections nowait { #pragma omp section for (i=0; i < N; i++) c[i] = a[i] + b[i]; #pragma omp section for (i=0; i < N; i++) d[i] = a[i] * b[i]; } /* end of sections */ } /* end of parallel region */ }
BLAS.h
// // Created by kazem on 7/18/17. // #ifndef TRIANGOPENMP_BLAS_H #define TRIANGOPENMP_BLAS_H namespace nasoq { void dlsolve_blas_nonUnit(int ldm, int ncol, double *M, double *rhs)//general triangular solver { int k; double x0, x1, x2, x3, x4, x5, x6, x7; double *M0; register double *Mki0, *Mki1, *Mki2, *Mki3, *Mki4, *Mki5, *Mki6, *Mki7; register int firstcol = 0; M0 = &M[0]; while (firstcol < ncol - 7) { /* Do 8 columns */ Mki0 = M0; Mki1 = Mki0 + ldm + 1; Mki2 = Mki1 + ldm + 1; Mki3 = Mki2 + ldm + 1; Mki4 = Mki3 + ldm + 1; Mki5 = Mki4 + ldm + 1; Mki6 = Mki5 + ldm + 1; Mki7 = Mki6 + ldm + 1; x0 = rhs[firstcol] / *Mki0++; x1 = (rhs[firstcol + 1] - x0 * *Mki0++) / *Mki1++; x2 = (rhs[firstcol + 2] - x0 * *Mki0++ - x1 * *Mki1++) / *Mki2++; x3 = (rhs[firstcol + 3] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++) / *Mki3++; x4 = (rhs[firstcol + 4] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++) / *Mki4++; x5 = (rhs[firstcol + 5] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++) / *Mki5++; x6 = (rhs[firstcol + 6] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++ - x5 * *Mki5++) / *Mki6++; x7 = (rhs[firstcol + 7] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++ - x5 * *Mki5++ - x6 * *Mki6++) / *Mki7++; rhs[firstcol++] = x0; rhs[firstcol++] = x1; rhs[firstcol++] = x2; rhs[firstcol++] = x3; rhs[firstcol++] = x4; rhs[firstcol++] = x5; rhs[firstcol++] = x6; rhs[firstcol++] = x7; for (k = firstcol; k < ncol; k++) rhs[k] = rhs[k] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++ - x5 * *Mki5++ - x6 * *Mki6++ - x7 * *Mki7++; M0 += 8 * ldm + 8; } while (firstcol < ncol - 3) { /* Do 4 columns */ Mki0 = M0; Mki1 = Mki0 + ldm + 1; Mki2 = Mki1 + ldm + 1; Mki3 = Mki2 + ldm + 1; x0 = rhs[firstcol] / *Mki0++; x1 = (rhs[firstcol + 1] - x0 * *Mki0++) / *Mki1++; x2 = (rhs[firstcol + 2] - x0 * *Mki0++ - x1 * *Mki1++) / *Mki2++; x3 = (rhs[firstcol + 3] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++) / *Mki3++; rhs[firstcol++] = x0; rhs[firstcol++] = x1; rhs[firstcol++] = x2; rhs[firstcol++] = x3; for (k = firstcol; k < ncol; k++) rhs[k] = rhs[k] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++; M0 += 4 * ldm + 4; } if (firstcol < ncol - 1) { /* Do 2 columns */ Mki0 = M0; Mki1 = Mki0 + ldm + 1; x0 = rhs[firstcol] / *Mki0++; x1 = (rhs[firstcol + 1] - x0 * *Mki0++) / *Mki1++; rhs[firstcol++] = x0; rhs[firstcol++] = x1; for (k = firstcol; k < ncol; k++) rhs[k] = rhs[k] - x0 * *Mki0++ - x1 * *Mki1++; M0 += 2 * ldm + 2; } if (firstcol == ncol - 1) { /* Do 1 columns */ Mki0 = M0; x0 = rhs[firstcol] / *Mki0; rhs[firstcol] = x0; } } void lSolve_dense_col_sync(int colSize, int col, double *M, double *rhs) { //#pragma omp critical for (int i = 0; i < col; ++i) { //#pragma omp atomic rhs[i] /= M[i * colSize + i]; for (int j = i + 1; j < col; ++j) { double tmp = M[i * colSize + j] * rhs[i]; //#pragma omp atomic rhs[j] -= tmp; } } //return 1; } void dmatvec_blas( int ldm, /* in -- leading dimension of M */ int nrow, /* in */ int ncol, /* in */ double *M, /* in */ double *vec, /* in */ double *Mxvec /* in/out */ ) { double vi0, vi1, vi2, vi3, vi4, vi5, vi6, vi7; double *M0; register double *Mki0, *Mki1, *Mki2, *Mki3, *Mki4, *Mki5, *Mki6, *Mki7; register int firstcol = 0; int k; M0 = &M[0]; while (firstcol < ncol - 7) { /* Do 8 columns */ Mki0 = M0; Mki1 = Mki0 + ldm; Mki2 = Mki1 + ldm; Mki3 = Mki2 + ldm; Mki4 = Mki3 + ldm; Mki5 = Mki4 + ldm; Mki6 = Mki5 + ldm; Mki7 = Mki6 + ldm; vi0 = vec[firstcol++]; vi1 = vec[firstcol++]; vi2 = vec[firstcol++]; vi3 = vec[firstcol++]; vi4 = vec[firstcol++]; vi5 = vec[firstcol++]; vi6 = vec[firstcol++]; vi7 = vec[firstcol++]; for (k = 0; k < nrow; k++) Mxvec[k] += vi0 * *Mki0++ + vi1 * *Mki1++ + vi2 * *Mki2++ + vi3 * *Mki3++ + vi4 * *Mki4++ + vi5 * *Mki5++ + vi6 * *Mki6++ + vi7 * *Mki7++; M0 += 8 * ldm; } while (firstcol < ncol - 3) { /* Do 4 columns */ Mki0 = M0; Mki1 = Mki0 + ldm; Mki2 = Mki1 + ldm; Mki3 = Mki2 + ldm; vi0 = vec[firstcol++]; vi1 = vec[firstcol++]; vi2 = vec[firstcol++]; vi3 = vec[firstcol++]; for (k = 0; k < nrow; k++) Mxvec[k] += vi0 * *Mki0++ + vi1 * *Mki1++ + vi2 * *Mki2++ + vi3 * *Mki3++; M0 += 4 * ldm; } while (firstcol < ncol) { /* Do 1 column */ Mki0 = M0; vi0 = vec[firstcol++]; for (k = 0; k < nrow; k++) Mxvec[k] += vi0 * *Mki0++; M0 += ldm; } } } #endif //TRIANGOPENMP_BLAS_H
helper.h
#ifndef HELPER_H #define HELPER_H #include "config.h" #define MIN(x, y) ((x) < (y) ? (x) : (y)) #define MAX(x, y) ((x) > (y) ? (x) : (y)) static inline int log2int(int x) { return 31 - __builtin_clz(x); } struct ISMQ { int16_t *value; int16_t *parent, *left; int findp(int x) { return parent[x] == x ? x : (parent[x] = findp(parent[x])); } inline int joint(int l, int r) { l = findp(l); r = findp(r); parent[l] = r, left[r] = left[l]; return left[r]; } void init(int n, int16_t *mem_base) { parent = mem_base; left = mem_base+n+1; value = mem_base+(n+1)*2; int16_t *tmp; tmp = parent; for (int i = 0; i <= n; i++) *tmp = i, tmp++; tmp = left; for (int i = 0; i <= n; i++) *tmp = i, tmp++; memset(value, 0, sizeof(int16_t)*(n+1)); } int get(int x) { return value[findp(x)]; } void set(int x, int val) { value[x] = val; int y = x-1; while (y >= 0) { y = findp(y); if (value[y] > val) return ; y = joint(y, x); y--; } } }; struct SparseTable { int16_t *tb[MAXLOGN]; // create tb[0..logN][0..n] inline void init(int n, int logN, int16_t *mem_base) { for (int i = 0; i <= logN; i++) { tb[i] = mem_base + i*(n+1); tb[i][0] = 0; } } // parallel build inline void parallel_build(int n, int logN) { for (int k = 1; k <= logN; k++) { int16_t *tbu = tb[k]; const int16_t *tbv = tb[k-1]; #pragma omp for schedule(static) for (int i = 1; i <= n; i++) { if (i-(1<<(k-1)) >= 0) { int16_t p = tbv[i-(1<<(k-1))]; int16_t q = tbv[i]; tbu[i] = MAX(q, p); } } } } // set tb[0][x] = val, and update its relationship inline void set(int x, int16_t val, int limG) { tb[0][x] = val; for (int i = 1; i <= limG && (1<<(i-1)) <= x; i++) { int16_t p = tb[i-1][x-(1<<(i-1))]; int16_t q = tb[i-1][x]; tb[i][x] = MAX(q, p); } } // query the maximum value of interval [l..r] inline int16_t get(int l, int r) { int t = log2int(r-l+1); int16_t p = tb[t][l+(1<<t)-1]; int16_t q = tb[t][r]; return MAX(q, p); } inline int16_t get(int l, int r, int t) { int16_t p = tb[t][l+(1<<t)-1]; int16_t q = tb[t][r]; return MAX(q, p); } }; #undef MIN #undef MAX #endif
jacobi_omp.c
/* * Copyright (c) 2008, BSC (Barcelon Supercomputing Center) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> #include <time.h> #define NB 32 #define B 256 #define FALSE (0) #define TRUE (1) typedef double fp_type; typedef fp_type *vin; typedef fp_type *vout; typedef fp_type *bin; typedef fp_type *binout; fp_type *A[NB][NB]; fp_type *A_new[NB][NB]; fp_type *tmp[NB][NB]; void alloc_and_genmat() { int init_val, i, j, ii, jj; fp_type *p, *p_new; init_val = 1325; for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL) { printf("Out of memory\n"); exit(1); } p = A[ii][jj]; p_new = A_new[ii][jj]; for (i = 0; i < B; i++) { for (j = 0; j < B; j++) { init_val = (3125 * init_val) % 65536; (*p) = (fp_type)((init_val - 32768.0) / 16384.0); (*p_new) = (*p); p++; p_new++; } } } } } long usecs(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } void clear(vout v) { int i, j, k; for (i = 0; i < B; i++) v[i] = (fp_type)0.0; } void getlastrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[(B - 1) * B + j]; } void getlastcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + B - 1]; } void getfirstrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[0 * B + j]; } void getfirstcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + 0]; } void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new) { int i, j; fp_type tmp; fp_type left, top, right, bottom; for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { tmp = A[i * B + j]; left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]); top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]); right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]); bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]); A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom); } } } double maxdelta() { double dmax = -__DBL_MAX__; int ii, jj, i, j; #pragma omp parallel for schedule(static) reduction(max: dmax) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]); if(diff > dmax) dmax = diff; } } } } return dmax; } void compute(int niters) { int iters; int ii, jj; fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B]; double delta = 2.0; double epsilon = 1e-7; iters = 0; // for (iters = 0; iters < niters; iters++) while(iters < niters) { ++iters; #pragma omp parallel \ private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \ shared(A, A_new) { #pragma omp for schedule(static) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { if (ii > 0) getlastrow(A[ii - 1][jj], tophalo); else clear(tophalo); if (jj > 0) getlastcol(A[ii][jj - 1], lefthalo); else clear(lefthalo); if (ii < NB - 1) getfirstrow(A[ii + 1][jj], bottomhalo); else clear(bottomhalo); if (jj < NB - 1) getfirstcol(A[ii][jj + 1], righthalo); else clear(lefthalo); jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]); } // jj } // ii } // end parallel delta = maxdelta(); printf("iteration %d: delta = %e\n", iters, delta); // yes, this is an inefficient copy // however, the library version requires you to do a copy in this way // on all of the component parts to avoid segmentation fault #pragma omp parallel for schedule(static) shared(A, A_new) for(int i = 0; i < NB; ++i) { for(int j = 0; j < NB; ++j) { for(int k = 0; k < B; ++k) for(int l = 0; l < B; ++l) A[i][j][k * B + l] = A_new[i][j][k * B + l]; } } } // iter } int main(int argc, char *argv[]) { int niters; // pp_time_t tm; // memset( &tm, 0, sizeof(tm) ); struct timespec start, end; if (argc > 1) { niters = atoi(argv[1]); } else niters = 1; alloc_and_genmat(); clock_gettime(CLOCK_MONOTONIC, &start); compute(niters); clock_gettime(CLOCK_MONOTONIC, &end); double time_taken = (end.tv_sec - start.tv_sec) * 1e9; time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9; printf("Running time = %g %s\n", time_taken, "s"); /* FILE *outFile; outFile = fopen("./jacobi_omp_values.txt", "w"); if (outFile == NULL) { fprintf(stderr, "Error writing to file\n"); } else { int ii, jj, i, j; for (ii = 0; ii < NB; ++ii) for (jj = 0; jj < NB; ++jj) for (i = 0; i < B; ++i) for (j = 0; j < B; ++j) fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]); fclose(outFile); } */ return 0; }
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; i++) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; q++) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; j++) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; i++) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; i++) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif /* Conjugate gradient algorithm to solve Ax=b. */ cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; i++) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor) / 2.0f; } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } float BPH_tri_tetra_volume_signed_6x(Implicit_Data *data, int v1, int v2, int v3) { /* The result will be 6x the volume */ return volume_tri_tetrahedron_signed_v3_6x(data->X[v1], data->X[v2], data->X[v3]); } void BPH_mass_spring_force_pressure( Implicit_Data *data, int v1, int v2, int v3, float pressure_difference, float weights[3]) { float nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = pressure_difference * area / 3.0f; /* add pressure to each of the face verts */ madd_v3_v3fl(data->F[v1], nor, factor * weights[0]); madd_v3_v3fl(data->F[v2], nor, factor * weights[1]); madd_v3_v3fl(data->F[v3], nor, factor * weights[2]); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; b++) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; b++) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } #ifndef _WIN32 /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } #endif /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
symm.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* symm.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "symm.h" /* Array initialization. */ static void init_array(int m, int n, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C, M, N, m, n), DATA_TYPE POLYBENCH_2D(A, M, M, m, m), DATA_TYPE POLYBENCH_2D(B, M, N, m, n)) { int i, j; *alpha = 1.5; *beta = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) { C[i][j] = (DATA_TYPE) ((i + j) % 100) / m ; B[i][j] = (DATA_TYPE) ((n + i - j) % 100) / m ; } for (i = 0; i < m; i++) { for (j = 0; j <= i; j++) A[i][j] = (DATA_TYPE) ((i + j) % 100) / m ; for (j = i + 1; j < m; j++) A[i][j] = -999; //regions of arrays that should not be used } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, int n, DATA_TYPE POLYBENCH_2D(C, M, N, m, n)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("C"); for (i = 0; i < m; i++) for (j = 0; j < n; j++) { if ((i * m + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]); } POLYBENCH_DUMP_END("C"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_symm(int m, int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C, M, N, m, n), DATA_TYPE POLYBENCH_2D(A, M, M, m, m), DATA_TYPE POLYBENCH_2D(B, M, N, m, n)) { int i, j, k; DATA_TYPE temp2; for (i = 0; i < _PB_M; i++) { #pragma omp parallel for default(shared) private(j, k, temp2) firstprivate(n, i, alpha, beta, B, A) for (j = 0; j < _PB_N; j++ ) { temp2 = 0; for (k = 0; k < i; k++) { C[k][j] += alpha * B[i][j] * A[i][k]; temp2 += B[k][j] * A[i][k]; } C[i][j] = beta * C[i][j] + alpha * B[i][j] * A[i][i] + alpha * temp2; } } } int main(int argc, char** argv) { /* Retrieve problem size. */ int m = M; int n = N; /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, M, N, m, n); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, M, M, m, m); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, M, N, m, n); /* Initialize array(s). */ init_array (m, n, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_symm (m, n, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, n, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
displacement_lagrangemultiplier_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The contact solution mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); } // Copy constructor. DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Initialize TDataType disp_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, dof_value ,dof_incr) reduction(+:disp_solution_norm, lm_solution_norm, disp_increase_norm, lm_increase_norm, disp_dof_num, lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id]) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; disp_dof_num++; } } } } if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0; if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0; if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0; const TDataType disp_abs = std::sqrt(disp_increase_norm)/static_cast<TDataType>(disp_dof_num); const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num); // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
atomic_messages.c
// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp -fopenmp-version=50 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd -fopenmp-version=50 -ferror-limit 100 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp atomic read argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; } goto L2; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; } void hint() { int a = 0; #pragma omp atomic hint // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected '(' after 'hint'}} a += 1; #pragma omp atomic hint( // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(+ // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(a // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{integer constant expression}} a += 1; #pragma omp atomic hint(a) // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} omp50-error {{integer constant expression}} a += 1; #pragma omp atomic hint(1) hint(1) // omp45-error 2 {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{directive '#pragma omp atomic' cannot contain more than one 'hint' clause}} a += 1; }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 3 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; /// The likelihood of a branch being taken. enum Likelihood { LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute. LH_None, ///< No attribute set or branches of the IfStmt have ///< the same attribute. LH_Likely ///< Branch has the [[likely]] attribute. }; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \returns the likelihood of a set of attributes. static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs); /// \returns the likelihood of a statement. static Likelihood getLikelihood(const Stmt *S); /// \returns the likelihood of the 'then' branch of an 'if' statement. The /// 'else' branch is required to determine whether both branches specify the /// same likelihood, which affects the result. static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else); /// \returns whether the likelihood of the branches of an if statement are /// conflicting. When the first element is \c true there's a conflict and /// the Attr's are the conflicting attributes of the Then and Else Stmt. static std::tuple<bool, const Attr *, const Attr *> determineLikelihoodConflict(const Stmt *Then, const Stmt *Else); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(raw_ostream &OS, const ASTContext &Context) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL, SourceLocation RPL, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc, RParenLoc; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
omp-matmat-one-parallel.c
/***************************************************************************** Example : omp-matmat-one-parallel.c Objective : Matrix - Matrix Multiplication using OpenMP one PARALLEL for directive and Private Clause Input : Size of Matrices(i.e Size of Matrix A and Matrix B) ie in terms of CLASS where CLASS A :1024; CLASS B: 2048 and CLASS C: 4096 Number of Threads Output : Number of Threads Total Memory Utilized for the Matrix - Matrix Computation Total Time Taken for Matrix - Matrix Computaion Created : Aug 2011 Author : RarchK *********************************************************************************/ #include <stdio.h> #include <sys/time.h> #include <omp.h> #include<string.h> #include <stdlib.h> /* Function declaration */ double Matrix_Multiplication_One(double **Matrix_A,double **Matrix_B,double **Result,int N_size,int Total_threads); /* Main Program */ main(int argc , char * argv[]) { int CLASS_SIZE,N_size, i,j,k,Total_threads,THREADS; double Total_overhead = 0.0; double **Matrix_A, **Matrix_B, **Result; double memoryused=0.0; int iteration; FILE *fp; char *CLASS; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : Dense Matrix Computations (Floating Point Operations)\n "); printf("\n\t\t Matrix into Matrix Multiplication using "); printf("\n\t\t OpenMP one PARALLEL for directive and Private Clause;"); printf("\n\t\t..........................................................................\n"); /* Checking for command line arguments */ if( argc != 3 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Class-Size> <Threads>\n"); printf("\t\t Where : Class-Size = A or B or C\n"); exit(-1); } else { CLASS = argv[1]; THREADS = atoi(argv[2]); } if( strcmp(CLASS, "A" )==0){ CLASS_SIZE = 1024; } if( strcmp(CLASS, "B" )==0){ CLASS_SIZE = 2048; } if( strcmp(CLASS, "C" )==0){ CLASS_SIZE = 4096; } N_size = CLASS_SIZE; Total_threads = THREADS; printf("\n\t\t Matrix Size : %d",N_size); printf("\n\t\t Threads : %d",Total_threads); printf("\n"); /* Matrix_A Elements */ Matrix_A = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) { Matrix_A[i] = (double *) malloc(sizeof(double) * N_size); for (j = 0; j < N_size; j++) { // srand48((unsigned int)N_size); // Matrix_A[i][j] = (double)(rand()%10); Matrix_A[i][j] = i+j; } } /* Matrix_B Elements */ Matrix_B = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) { Matrix_B[i] = (double *) malloc(sizeof(double) * N_size); for (j = 0; j < N_size; j++) { // srand48((unsigned int)N_size); // Matrix_B[i][j] = (double)(rand()%10); Matrix_B[i][j] = i+j; } } /* Dynamic Memory Allocation */ Result = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) Result[i] = (double *) malloc(sizeof(double) * N_size); memoryused = (3*(N_size*N_size))*sizeof(double); /* Function Calling */ Total_overhead = Matrix_Multiplication_One(Matrix_A,Matrix_B,Result,N_size,Total_threads); printf("\n\t\t Memory Utilized : %lf MB \n",(memoryused/(1024*1024))); printf("\n\t\t Time in Seconds (T) : %lf Seconds \n",Total_overhead); printf("\n\t\t ( T represents the Time taken for the computation )"); printf("\n\t\t..........................................................................\n"); /* Free Memory */ free(Matrix_A); free(Matrix_B); free(Result); }/* Main function end */ /* Functions implementation */ double Matrix_Multiplication_One(double **Matrix_A,double **Matrix_B,double **Result,int N_size,int Total_threads) { int i,j,k; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; gettimeofday(&TimeValue_Start, &TimeZone_Start); /* Set the no. of threads */ omp_set_num_threads(Total_threads); /* OpenMP Parallel For Directive : Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(j,k) for (i = 0; i < N_size; i = i + 1) for (j = 0; j < N_size; j = j + 1){ Result[i][j]=0.0; for (k = 0; k < N_size; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; }/* All threads join master thread and disband */ gettimeofday(&TimeValue_Final, &TimeZone_Final); time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Matrix Multiplication using one Parallel for pragma......Done \n"); return time_overhead; }
truedepfirstdimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Outer loop has loop carried true dependence */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int n=1000, m=1000; double b[1000][1000]; for (i=0; i<n; i++) for (j=0; j<m; j++) b[i][j] = 0.5; #pragma omp parallel for for (i=1;i<n;i++) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; printf("b[500][500]=%f\n", b[500][500]); return 0; }
omp_hello.c
/****************************************************************************** * * FILE: omp_hello.c * * DESCRIPTION: * * OpenMP Example - Hello World - C/C++ Version * * In this simple example, the master thread forks a parallel region. * * All threads in the team obtain their unique thread number and print it. * * The master thread only prints the total number of threads. Two OpenMP * * library routines are used to obtain the number of threads and each * * thread's number. * * AUTHOR: Blaise Barney 5/99 * * LAST REVISED: 04/06/05 * ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); if (tid == 0) { nthreads = 5; printf("Number of threads = %d\n", nthreads); } /* Only master thread does this */ if (tid == 1) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__identity_int32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int32_int64 // op(A') function: GB_unop_tran__identity_int32_int64 // C type: int32_t // A type: int64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int32_int64 ( int32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fig4.87-nested-parallel-mod.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #define omp_get_nested() 0 #endif int main() { int TID = -1; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); (void) omp_set_nested(TRUE); if (! omp_get_nested()) {printf("Warning: nested parallelism not set\n");} #endif printf("Nested parallelism is %s\n", omp_get_nested() ? "supported" : "not supported"); /* ------------------------------------------------------------------------ Inside the parallel region we can distinguish between the threads ------------------------------------------------------------------------ */ #pragma omp parallel private(TID) { TID = omp_get_thread_num(); printf("Thread %d executes the outer parallel region\n",TID); #pragma omp parallel num_threads(2) firstprivate(TID) { printf("TID %d: Thread %d executes inner parallel region\n", TID,omp_get_thread_num()); } /*-- End of inner parallel region --*/ } /*-- End of outer parallel region --*/ return(0); }
dart.h
#ifndef DART__DART_H_ #define DART__DART_H_ /** * \file dart.h * * \defgroup DartInterface DART - The DASH Runtime Interface * * Common C interface of the underlying communication back-end. * * * DASH/DART Terminology * ===================== * * DASH is a realization of the PGAS (partitioned global address space) * programming model. Below is an attempt to define some of the * terminology used in the project. DART is the name of the DASH * runtime. * * DASH Units, Teams, and Groups * ----------------------------- * * The individual participants in a DASH program are called units. One * can think of a DASH unit like an MPI process or UPC thread. The * generic term 'unit' is used to have the conceptual freedom to later * map a dash unit to a OS process, thread, or any other concept that * might fit (for example, in the context of GPUs and accelerators). * * Teams are ordered sets of units, identified by an integer ID. Each * unit has a non-negative, zero-based integer ID in a given team, which * always remains unchanged throughout the lifetime of the team. In * each application there exists a default team that contains all the * units that comprise the program denoted by DART_TEAM_ALL. * * Groups are also sets of units. The difference between groups and * teams is that groups have local meaning only, while teams are * coherent across several units. In effect, group related operations * are local, while operations to manipulate teams are collective and * will require communication and can thus be costly. * * Local/Global/Private/Shared * --------------------------- * * ### 1) Local and Global: ##### * The terms local and global are adjectives to describe the address * spaces in a DASH program. The local address space of a dash unit is * managed by the regular OS mechanisms (malloc, free), and data items * in the local address space are addressed by regular pointers. The * global address space in a DASH program is a virtual abstraction. Each * DASH unit contributes a part of it's memory to make up it's partition * of the global address space. Data items in the global memory are * addressed by global pointers provided by the DART runtime. * * ### 2) Private and Shared: ### * The adjectives private and shared describe the accessibility of data * items in DASH. A shared datum is one that can be accessed by more * than one unit (by means of the DART runtime). A private datum is one * that is not shared. * * ### 3) Partitions, Affinity, Ownership ### * ... to be written... * idea: we might use the term affinity to express hierarchical locality * * ### 4) Team-Alignment and Symmetricity: ### * Team-aligned and symmetric are terms describing memory allocations. * A memory allocation is symmetric (with respect to a team), if the * same amount of memory (in bytes) is allocated by each member of the * team. The memory allocation is said to be team-aligned (with respect * to a specific team), if the same segment-id can be used in a global * pointer to refer to any member's portion of the allocated memory. * (See section on global pointers below on segment IDs). * * A team-aligned and symmetric allocation has the nice property that * any member of the team is able to locally compute a global pointer to * any location in the allocated memory. * * * A note on thread safety: * ------------------------ * * In this release, most of DART's functionality cannot be called from within * multiple threads in parallel. This is especially true for * \ref DartGroupTeam "group and team management" and \ref DartGlobMem "global * memory management" functionality as well as \ref DartCommunication * "communication operations". * All exceptions from this rule have been marked accordingly in the * documentation. Improvements to thread-safety of DART are scheduled for the * next release. * * Note that this also affects global operations in DASH as they rely on DART * functionality. However, all operations on local data can be considered * thread-safe, e.g., `Container.local` or `Container.lbegin`. * The local access operators adhere to the C++ STL thread-safety * rules (see http://en.cppreference.com/w/cpp/container for details). * Thus, the following code is valid: * * \code{.cc} dash::Array<int> arr(...); #pragma omp parallel for // OK to parallelize since we're working on .local for( auto i=0; i<arr.local.size(); i++ ) [ arr.local[i]=foo(i); } * \endcode * * * Logging * ------- * * DART can be configured to produce log output with different log levels, a * feature that is mainly meant for debugging purposes. To enable general * logging output, the parameter \c -DENABLE_DART_LOGGING=ON should be * passed to CMake when building DART/DASH. Alternatively, the pre-compiler * macro \c DART_ENABLE_LOGGING can be defined manually. Please note that the * additional log output may cause notable performance overhead and should * not be enabled for production runs. * * The verbosity of the log output can be controlled at runtime through * the environment variable DART_LOG_LEVEL, whose value (if set) controls * the maximum log level. Possible values are: * - \c DART_LOGLEVEL_ERROR: Emit only messages on errors that are fatal * (similar to having logging disabled). * - \c DART_LOGLEVEL_WARN: Emit error messages and non-fatal warnings. * - \c DART_LOGLEVEL_INFO: In addition to errors and warnings, emit * additional information on the execution * of the DART library. * - \c DART_LOGLEVEL_DEBUG: Issue detailed debugging output on (mostly) * all DART methods executed. * - \c DART_LOGLEVEL_TRACE: In addition to the above, also output * information on the internal state of DART. * */ #ifdef __cplusplus extern "C" { #endif /* --- DART version and build date --- */ /** \cond DART_HIDDEN_SYMBOLS */ #define DART_VERSION_STR "3.2.0" #define DART_BUILD_STR (__DATE__ " " __TIME__) /** \endcond */ /* --- DART types and return values */ #include "dart_types.h" /* --- DART build- and environment configuration */ #include "dart_config.h" /* --- DART init/finalization */ #include "dart_initialization.h" /* --- DART group and team management --- */ #include "dart_team_group.h" /* --- DART global pointer and memory management --- */ #include "dart_globmem.h" /* --- DART collective communication --- --- DART onesided communication --- */ #include "dart_communication.h" /* --- DART synchronization --- */ #include "dart_synchronization.h" #ifdef __cplusplus } // extern "C" #endif #endif /* DART_DART_H_ */
dotProduct_critical.c
/* OpenMP example program which computes the dot product of two arrays a and b (that is sum(a[i]*b[i]) ) using explicit synchronization with a critical region. Compile with gcc -O3 -fopenmp omp_critical.c -o omp_critical */ // Online source: http://users.abo.fi/mats/PP2012/examples/OpenMP/omp_critical.c // permission obtained #include <omp.h> #include <stdio.h> #include <stdlib.h> #ifdef _CIVL #define N 10 #else #define N 100 #endif int main (int argc, char *argv[]) { double a[N], b[N]; double localsum, sum = 0.0; int i, tid, nthreads; #pragma omp parallel shared(a,b,sum) private(i, localsum, tid) { /* Get thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ #pragma omp master { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } /* Initialization */ #pragma omp for for (i=0; i < N; i++) a[i] = b[i] = (double)i; localsum = 0.0; /* Compute the local sums of all products */ #pragma omp for for (i=0; i < N; i++) localsum = localsum + (a[i] * b[i]); #pragma omp critical sum = sum+localsum; } /* End of parallel region */ printf(" Sum = %2.1f\n",sum); exit(0); }
health.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /**********************************************************************************************/ /* OLDEN parallel C for dynamic structures: compiler, runtime system * and benchmarks * * Copyright (C) 1994-1996 by Anne Rogers (amr@cs.princeton.edu) and * Martin Carlisle (mcc@cs.princeton.edu) * ALL RIGHTS RESERVED. * * OLDEN is distributed under the following conditions: * * You may make copies of OLDEN for your own use and modify those copies. * * All copies of OLDEN must retain our names and copyright notice. * * You may not sell OLDEN or distribute OLDEN in conjunction with a * commercial product or service without the expressed written consent of * Anne Rogers and Martin Carlisle. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. * */ /******************************************************************* * Health.c : Model of the Colombian Health Care System * *******************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "app-desc.h" #include "bots.h" #include "health.h" /* global variables */ int sim_level; int sim_cities; int sim_population_ratio; int sim_time; int sim_assess_time; int sim_convalescence_time; long sim_seed; float sim_get_sick_p; float sim_convalescence_p; float sim_realloc_p; int sim_pid = 0; int res_population; int res_hospitals; int res_personnel; int res_checkin; int res_village; int res_waiting; int res_assess; int res_inside; float res_avg_stay; /********************************************************** * Handles math routines for health.c * **********************************************************/ float my_rand(long *seed) { long k; float answer; long idum = *seed; idum ^= MASK; k = idum / IQ; idum = IA * (idum - k * IQ) - IR * k; idum ^= MASK; if (idum < 0) idum += IM; answer = (float) AM * idum; *seed = (long) (answer * IM); return answer; } /******************************************************************** * Handles lists. * ********************************************************************/ void addList(struct Patient **list, struct Patient *patient) { if (*list == NULL) { *list = patient; patient->back = NULL; patient->forward = NULL; } else { struct Patient *aux = *list; while (aux->forward != NULL) aux = aux->forward; aux->forward = patient; patient->back = aux; patient->forward = NULL; } } void removeList(struct Patient **list, struct Patient *patient) { #if 0 struct Patient *aux = *list; if (patient == NULL) return; while((aux != NULL) && (aux != patient)) aux = aux->forward; // Patient not found if (aux == NULL) return; // Removing patient if (aux->back != NULL) aux->back->forward = aux->forward; else *list = aux->forward; if (aux->forward != NULL) aux->forward->back = aux->back; #else if (patient->back != NULL) patient->back->forward = patient->forward; else *list = patient->forward; if (patient->forward != NULL) patient->forward->back = patient->back; #endif } /**********************************************************************/ void allocate_village( struct Village **capital, struct Village *back, struct Village *next, int level, int vid) { int i, population, personnel; struct Village *current, *inext; struct Patient *patient; if (level == 0) *capital = NULL; else { personnel = (int) pow(2, level); population = personnel * sim_population_ratio; /* Allocate Village */ *capital = (struct Village *) malloc(sizeof(struct Village)); /* Initialize Village */ (*capital)->back = back; (*capital)->next = next; (*capital)->level = level; (*capital)->id = vid; (*capital)->seed = vid * (IQ + sim_seed); (*capital)->population = NULL; for(i=0;i<population;i++) { patient = (struct Patient *)malloc(sizeof(struct Patient)); patient->id = sim_pid++; patient->seed = (*capital)->seed; // changes seed for capital: my_rand(&((*capital)->seed)); patient->hosps_visited = 0; patient->time = 0; patient->time_left = 0; patient->home_village = *capital; addList(&((*capital)->population), patient); } /* Initialize Hospital */ (*capital)->hosp.personnel = personnel; (*capital)->hosp.free_personnel = personnel; (*capital)->hosp.assess = NULL; (*capital)->hosp.waiting = NULL; (*capital)->hosp.inside = NULL; (*capital)->hosp.realloc = NULL; omp_init_lock(&(*capital)->hosp.realloc_lock); // Create Cities (lower level) inext = NULL; for (i = sim_cities; i>0; i--) { allocate_village(&current, *capital, inext, level-1, (vid*sim_cities)+i); inext = current; } (*capital)->forward = current; } } /**********************************************************************/ struct Results get_results(struct Village *village) { struct Village *vlist; struct Patient *p; struct Results t_res, p_res; t_res.hosps_number = 0.0; t_res.hosps_personnel = 0.0; t_res.total_patients = 0.0; t_res.total_in_village = 0.0; t_res.total_waiting = 0.0; t_res.total_assess = 0.0; t_res.total_inside = 0.0; t_res.total_hosps_v = 0.0; t_res.total_time = 0.0; if (village == NULL) return t_res; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { p_res = get_results(vlist); t_res.hosps_number += p_res.hosps_number; t_res.hosps_personnel += p_res.hosps_personnel; t_res.total_patients += p_res.total_patients; t_res.total_in_village += p_res.total_in_village; t_res.total_waiting += p_res.total_waiting; t_res.total_assess += p_res.total_assess; t_res.total_inside += p_res.total_inside; t_res.total_hosps_v += p_res.total_hosps_v; t_res.total_time += p_res.total_time; vlist = vlist->next; } t_res.hosps_number += 1.0; t_res.hosps_personnel += village->hosp.personnel; // Patients in the village p = village->population; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_in_village += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } // Patients in hospital: waiting p = village->hosp.waiting; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_waiting += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } // Patients in hospital: assess p = village->hosp.assess; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_assess += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } // Patients in hospital: inside p = village->hosp.inside; while (p != NULL) { t_res.total_patients += 1.0; t_res.total_inside += 1.0; t_res.total_hosps_v += (float)(p->hosps_visited); t_res.total_time += (float)(p->time); p = p->forward; } return t_res; } /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ void check_patients_inside(struct Village *village) { struct Patient *list = village->hosp.inside; struct Patient *p; while (list != NULL) { p = list; list = list->forward; p->time_left--; if (p->time_left == 0) { village->hosp.free_personnel++; removeList(&(village->hosp.inside), p); addList(&(village->population), p); } } } /**********************************************************************/ void check_patients_assess_par(struct Village *village) { struct Patient *list = village->hosp.assess; float rand; struct Patient *p; while (list != NULL) { p = list; list = list->forward; p->time_left--; if (p->time_left == 0) { rand = my_rand(&(p->seed)); /* sim_covalescense_p % */ if (rand < sim_convalescence_p) { rand = my_rand(&(p->seed)); /* !sim_realloc_p % or root hospital */ if (rand > sim_realloc_p || village->level == sim_level) { removeList(&(village->hosp.assess), p); addList(&(village->hosp.inside), p); p->time_left = sim_convalescence_time; p->time += p->time_left; } else /* move to upper level hospital !!! */ { village->hosp.free_personnel++; removeList(&(village->hosp.assess), p); omp_set_lock(&(village->hosp.realloc_lock)); addList(&(village->back->hosp.realloc), p); omp_unset_lock(&(village->hosp.realloc_lock)); } } else /* move to village */ { village->hosp.free_personnel++; removeList(&(village->hosp.assess), p); addList(&(village->population), p); } } } } /**********************************************************************/ void check_patients_waiting(struct Village *village) { struct Patient *list = village->hosp.waiting; struct Patient *p; while (list != NULL) { p = list; list = list->forward; if (village->hosp.free_personnel > 0) { village->hosp.free_personnel--; p->time_left = sim_assess_time; p->time += p->time_left; removeList(&(village->hosp.waiting), p); addList(&(village->hosp.assess), p); } else { p->time++; } } } /**********************************************************************/ void check_patients_realloc(struct Village *village) { struct Patient *p, *s; while (village->hosp.realloc != NULL) { p = s = village->hosp.realloc; while (p != NULL) { if (p->id < s->id) s = p; p = p->forward; } removeList(&(village->hosp.realloc), s); put_in_hosp(&(village->hosp), s); } } /**********************************************************************/ void check_patients_population(struct Village *village) { struct Patient *list = village->population; struct Patient *p; float rand; while (list != NULL) { p = list; list = list->forward; /* randomize in patient */ rand = my_rand(&(p->seed)); if (rand < sim_get_sick_p) { removeList(&(village->population), p); put_in_hosp(&(village->hosp), p); } } } /**********************************************************************/ void put_in_hosp(struct Hosp *hosp, struct Patient *patient) { (patient->hosps_visited)++; if (hosp->free_personnel > 0) { hosp->free_personnel--; addList(&(hosp->assess), patient); patient->time_left = sim_assess_time; patient->time += patient->time_left; } else { addList(&(hosp->waiting), patient); } } /**********************************************************************/ #if defined (IF_CUTOFF) void sim_village_par(struct Village *village) { struct Village *vlist; // lowest level returns nothing // only for sim_village first call with village = NULL // recursive call cannot occurs if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { #pragma omp task untied if((sim_level - village->level) < bots_cutoff_value) sim_village_par(vlist); vlist = vlist->next; } /* Uses lists v->hosp->inside, and v->return */ check_patients_inside(village); /* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */ check_patients_assess_par(village); /* Uses lists v->hosp->waiting, and v->hosp->assess */ check_patients_waiting(village); #pragma omp taskwait /* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */ check_patients_realloc(village); /* Uses list v->population, v->hosp->asses and v->h->waiting */ check_patients_population(village); } #elif defined (MANUAL_CUTOFF) void sim_village_par(struct Village *village) { struct Village *vlist; // lowest level returns nothing // only for sim_village first call with village = NULL // recursive call cannot occurs if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; if ((sim_level-village->level) < bots_cutoff_value) { while(vlist) { #pragma omp task untied sim_village_par(vlist); vlist = vlist->next; } } else { while(vlist) { sim_village_par(vlist); vlist = vlist->next; } } /* Uses lists v->hosp->inside, and v->return */ check_patients_inside(village); /* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */ check_patients_assess_par(village); /* Uses lists v->hosp->waiting, and v->hosp->assess */ check_patients_waiting(village); if ((sim_level-village->level) < bots_cutoff_value) { #pragma omp taskwait } /* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */ check_patients_realloc(village); /* Uses list v->population, v->hosp->asses and v->h->waiting */ check_patients_population(village); } #else void sim_village_par(struct Village *village) { struct Village *vlist; // lowest level returns nothing // only for sim_village first call with village = NULL // recursive call cannot occurs if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { #pragma omp task untied sim_village_par(vlist); vlist = vlist->next; } /* Uses lists v->hosp->inside, and v->return */ check_patients_inside(village); /* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */ check_patients_assess_par(village); /* Uses lists v->hosp->waiting, and v->hosp->assess */ check_patients_waiting(village); #pragma omp taskwait /* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */ check_patients_realloc(village); /* Uses list v->population, v->hosp->asses and v->h->waiting */ check_patients_population(village); } #endif /**********************************************************************/ void my_print(struct Village *village) { struct Village *vlist; struct Patient *plist; struct Patient *p; if (village == NULL) return; /* Traverse village hierarchy (lower level first)*/ vlist = village->forward; while(vlist) { my_print(vlist); vlist = vlist->next; } plist = village->population; while (plist != NULL) { p = plist; plist = plist->forward; bots_message("[pid:%d]",p->id); } bots_message("[vid:%d]\n",village->id); } /**********************************************************************/ void read_input_data(char *filename) { FILE *fin; int res; if ((fin = fopen(filename, "r")) == NULL) { bots_message("Could not open sequence file (%s)\n", filename); exit (-1); } res = fscanf(fin,"%d %d %d %d %d %d %ld %f %f %f %d %d %d %d %d %d %d %d %f", &sim_level, &sim_cities, &sim_population_ratio, &sim_time, &sim_assess_time, &sim_convalescence_time, &sim_seed, &sim_get_sick_p, &sim_convalescence_p, &sim_realloc_p, &res_population, &res_hospitals, &res_personnel, &res_checkin, &res_village, &res_waiting, &res_assess, &res_inside, &res_avg_stay ); if ( res == EOF ) { bots_message("Bogus input file (%s)\n", filename); exit(-1); } fclose(fin); // Printing input data bots_message("\n"); bots_message("Number of levels = %d\n", (int) sim_level); bots_message("Cities per level = %d\n", (int) sim_cities); bots_message("Population ratio = %d\n", (int) sim_population_ratio); bots_message("Simulation time = %d\n", (int) sim_time); bots_message("Assess time = %d\n", (int) sim_assess_time); bots_message("Convalescence time = %d\n", (int) sim_convalescence_time); bots_message("Initial seed = %d\n", (int) sim_seed); bots_message("Get sick prob. = %f\n", (float) sim_get_sick_p); bots_message("Convalescence prob. = %f\n", (float) sim_convalescence_p); bots_message("Realloc prob. = %f\n", (float) sim_realloc_p); } int check_village(struct Village *top) { struct Results result = get_results(top); int answer = BOTS_RESULT_SUCCESSFUL; if (res_population != result.total_patients) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_hospitals != result.hosps_number) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_personnel != result.hosps_personnel) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_checkin != result.total_hosps_v) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_village != result.total_in_village) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_waiting != result.total_waiting) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_assess != result.total_assess) answer = BOTS_RESULT_UNSUCCESSFUL; if (res_inside != result.total_inside) answer = BOTS_RESULT_UNSUCCESSFUL; bots_message("\n"); bots_message("Sim. Variables = expect / result\n"); bots_message("Total population = %6d / %6d people\n", (int) res_population, (int) result.total_patients); bots_message("Hospitals = %6d / %6d people\n", (int) res_hospitals, (int) result.hosps_number); bots_message("Personnel = %6d / %6d people\n", (int) res_personnel, (int) result.hosps_personnel); bots_message("Check-in's = %6d / %6d people\n", (int) res_checkin, (int) result.total_hosps_v); bots_message("In Villages = %6d / %6d people\n", (int) res_village, (int) result.total_in_village); bots_message("In Waiting List = %6d / %6d people\n", (int) res_waiting, (int) result.total_waiting); bots_message("In Assess = %6d / %6d people\n", (int) res_assess, (int) result.total_assess); bots_message("Inside Hospital = %6d / %6d people\n", (int) res_inside, (int) result.total_inside); bots_message("Average Stay = %6f / %6f u/time\n", (float) res_avg_stay,(float) result.total_time/result.total_patients); my_print(top); return answer; } /**********************************************************************/ void sim_village_main_par(struct Village *top) { long i; #pragma omp parallel #pragma omp single #pragma omp task untied for (i = 0; i < sim_time; i++) sim_village_par(top); }
test_taskargs.c
//===-- test_taskargs.c - Test task creation and argument passing *- C -*-===// // // Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "omp.h" int main(void) { int failed = 0; #pragma omp parallel shared(failed) { #pragma omp master { int tpvar = 42; int tpvar2 = 84; #pragma omp task firstprivate(tpvar, tpvar2) { int me = omp_get_thread_num(); fprintf(stderr, "In task in thread %d\n", me); fflush(stderr); fprintf(stderr, "%d: &tpvar = %p, &tpvar2 = %p\n", me, &tpvar, &tpvar2); fflush(stderr); fprintf(stderr, "%d: tpvar = %d (should be 42), tpvar2 = %d " "(should be 84)\n", me, tpvar, tpvar2); failed = (tpvar != 42) || (tpvar2 != 84); fflush(stderr); } } } printf("***%s***\n", failed ? "FAILED" : "PASSED"); return failed ? EXIT_FAILURE : EXIT_SUCCESS; }
omp_for_collapse.c
<ompts:test> <ompts:testdescription>Test with omp for collapse clause. Bind with two loops. Without the collapse clause, the first loop will not be ordered</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp for collapse</ompts:directive> <ompts:dependences>omp critical,omp for schedule</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function to check that i is increasing monotonically with each call */ static int check_i_islarger (int i) { static int last_i; int islarger; if (i==1) last_i=0; islarger = ((i >= last_i)&&(i - last_i<=1)); last_i = i; return (islarger); } int <ompts:testcode:functionname>omp_for_collapse</ompts:testcode:functionname> (FILE * logFile) { <ompts:orphan:vars> int is_larger = 1; </ompts:orphan:vars> #pragma omp parallel { <ompts:orphan> int i,j; int my_islarger = 1; #pragma omp for private(i,j) schedule(static,1) <ompts:check>collapse(2)</ompts:check> ordered for (i = 1; i < 100; i++){ <ompts:crosscheck>my_islarger = check_i_islarger(i)&& my_islarger;</ompts:crosscheck> for (j =1; j <100; j++) { <ompts:check> #pragma omp ordered my_islarger = check_i_islarger(i)&&my_islarger; </ompts:check> } /* end of for */ } #pragma omp critical is_larger = is_larger && my_islarger; </ompts:orphan> } return (is_larger); } </ompts:testcode> </ompts:test>
Fig_12.9_nestedOpenMP.c
#include <omp.h> #include <stdio.h> void report_num_threads(int level) { #pragma omp single { printf("Level %d: number of threads in the team: %d\n", \ level, omp_get_num_threads()); } } int main() { omp_set_dynamic(0); #pragma omp parallel num_threads(2) { report_num_threads(1); #pragma omp parallel num_threads(2) { report_num_threads(2); #pragma omp parallel num_threads(2) { report_num_threads(3); } } } return(0); }
GB_unaryop__minv_fp64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_int16 // op(A') function: GB_tran__minv_fp64_int16 // C type: double // A type: int16_t // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_int16 ( double *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quatLibIDLWrap.c
/* * cquat.c * cquat_multiply * * Created by dave on 3/30/11. * Copyright 2011 Naval Research Lab. All rights reserved. * */ #include "quatLibIDLWrap.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include "idl_export.h" #include "quatlib.h" #ifdef _OPENMP #include <omp.h> #define OPENMPLIMIT 10000000l #endif /* ######################################################################################################################### This is a set of functions for linking the quaternion multiplication. It includes reading in the arguements from IDL, casting the argc, *argv[] to their proper types. It then sends the arrays off to the proper loops, either using double or float. In the case of float, each quaternion is temporarily cast to a doulbe, the multiplication occurs in double then the result is cast back to a float. Everything is mutithreaded with openMP. #########################################################################################################################*/ int quatMultLoop ( double* q1, double* q2 ,double* q1q2, unsigned long long nq1, unsigned long long nq2, int p){ unsigned long long i, q1step, q2step, q1q2step, nq; int trash; /* A function that multiplies two quaterions (or many quaterions) openmp is used to multi-thread the operation */ q1q2step = 4; q1step = (nq1 == 1) ? 0 : 4; q2step = (nq2 == 1) ? 0 : 4; if ((nq1 > 1) && (nq2 > 1)){ nq = (nq1 <= nq2) ? nq1 : nq2; } else { nq = (nq1 >= nq2) ? nq1 : nq2; } #ifdef _OPENMP if (nq >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} //omp_set_num_threads(1) #endif #pragma omp parallel shared(q1, q2, q1q2) private(i, trash) firstprivate(q1step, q2step, q1q2step, nq, p) { #pragma omp for schedule (static) nowait for (i = 0;i < nq;i++){ trash = quatMultiply( &(q1[i*q1step]), &(q2[i*q2step]), &(q1q2[i*q1q2step]), p ); } } return 1; } int quatMultLoopf ( float* q1, float* q2 ,float* q1q2, unsigned long long nq1, unsigned long long nq2, int p){ unsigned long long i,j, q1step, q2step, q1q2step, nq, istep; int trash; double q1temp[4], q2temp[4], q1q2temp[4]; /* A function that multiplies two quaterions (or many quaterions) openmp is used to multi-thread the operation */ q1q2step = 4; q1step = (nq1 == 1) ? 0 : 4; q2step = (nq2 == 1) ? 0 : 4; if ((nq1 > 1) && (nq2 > 1)){ nq = (nq1 <= nq2) ? nq1 : nq2; } else { nq = (nq1 >= nq2) ? nq1 : nq2; } #ifdef _OPENMP if (nq >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} //omp_set_num_threads(1) #endif if ((nq1 == 1) && (nq > 1)) { #pragma omp parallel shared(q1, q2, q1q2) private(i,j, trash, q1temp, q2temp, q1q2temp) \ firstprivate(q1step, q2step, q1q2step, nq, p) { for (j=0; j<4; j++) q1temp[j] = (double) q1[j]; #pragma omp for schedule (static) nowait for (i = 0;i < nq;i++){ for (j=0; j<4; j++) q2temp[j] = (double) q2[i*q2step+j]; trash = quatMultiply( q1temp, q2temp, q1q2temp, p ); for (j=0;j<4;j++) q1q2[i*q1q2step+j] = (float) q1q2temp[j]; } } } else if ((nq2 == 1) && nq > 1) { #pragma omp parallel shared(q1, q2, q1q2) private(i,j, trash, q1temp, q2temp, q1q2temp) \ firstprivate(q1step, q2step, q1q2step, nq, p) { for (j=0; j<4; j++) q2temp[j] = (double) q2[j]; #pragma omp for schedule (static) nowait for (i = 0;i < nq;i++){ for (j=0; j<4; j++) q1temp[j] = (double) q1[i*q1step+j]; trash = quatMultiply( q1temp, q2temp, q1q2temp, p ); for (j=0;j<4;j++) q1q2[i*q1q2step+j] = (float) q1q2temp[j]; } } } else { #pragma omp parallel shared(q1, q2, q1q2) private(i,j, trash, q1temp, q2temp, q1q2temp) \ firstprivate(q1step, q2step, q1q2step, nq, p) { #pragma omp for schedule (static) nowait for (i = 0;i < nq;i++){ for (j=0; j<4; j++){ q2temp[j] = (double) q2[i*q2step+j]; q1temp[j] = (double) q1[i*q1step+j]; } trash = quatMultiply( q1temp, q2temp, q1q2temp, p ); for (j=0;j<4;j++) q1q2[i*q1q2step+j] = (float) q1q2temp[j]; } } } return 1; } int quatMultIDLWrap (int argc, void* argv[]) { /* Wrapper function for a quaterion mulitplication function for passing to IDL * quaterions are defined with (real, imaginary x y z) notation * * argc must equal 7 * argv[0] pointer to q1 * argv[1] pointer to q2 * argv[2] pointer to q1q2 - output * argv[3] number of quaterions in q1 * argv[4] number of quaterions in q2 * argv[5] p factor for passive/active switch * argv[6] IDL type code for float (4) or double (5). */ IDL_ULONG64 *nq1In, *nq2In; IDL_LONG *typeIn, *pIn; unsigned long long nq1, nq2; int type, p; nq1In = (IDL_ULONG64*) argv[3]; nq1 = (unsigned long long) (*nq1In); nq2In = (IDL_ULONG64*) argv[4]; nq2 = (unsigned long long) (*nq2In); pIn = (IDL_LONG*) argv[5]; p = (int) (*pIn); typeIn = (IDL_LONG*) argv[6]; type = (int) (*typeIn); if (type == 4) { // do the input/output will be stored as float (calcs still done in double) return quatMultLoopf ( (float*) argv[0], (float*) argv[1],(float*) argv[2], nq1, nq2, p); } else { // double calculation and storage return quatMultLoop ( (double*) argv[0], (double*) argv[1],(double*) argv[2], nq1, nq2, p); } } /* ######################################################################################################################### This is a set of functions for linking the quaternion vector operator. It includes reading in the arguements from IDL, casting the argc, *argv[] to their proper types. It then sends the arrays off to the proper loops, either using double or float. In the case of float, each quaternion is temporarily cast to a doulbe, the quat/vector operation occurs in double then the result is cast back to a float. Everything is mutithreaded with openMP. #########################################################################################################################*/ int quatVectLoop ( double* q, double* v ,double* vout, unsigned long long nq, unsigned long long nv, int p){ unsigned long long i, qstep, vstep, voutstep, n; int trash; /* A function that multiplies two quaterions (or many quaterions) openmp is used to multi-thread the operation */ voutstep = 3; qstep = (nq == 1) ? 0 : 4; vstep = (nv == 1) ? 0 : 3; if ((nq > 1) && (nv > 1)){ n = (nq <= nv) ? nq : nv; } else { n = (nq >= nv) ? nq : nv; } #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} //omp_set_num_threads(1) #endif #pragma omp parallel shared(q, v, vout) private(i, trash) firstprivate(qstep, vstep, voutstep, n,p) { #pragma omp for schedule (static) nowait for (i = 0;i < n;i++){ trash = quatVector( &(q[i*qstep]), &(v[i*vstep]), &(vout[i*voutstep]), p ); } } return 1; } int quatVectLoopf ( float* q, float* v ,float* vout, unsigned long long nq, unsigned long long nv, int p){ unsigned long long i,j, qstep, vstep, voutstep, n, istep; int trash; double qtemp[4], vtemp[3], vouttemp[3]; /* A function that multiplies two quaterions (or many quaterions) openmp is used to multi-thread the operation */ voutstep = 3; qstep = (nq == 1) ? 0 : 4; vstep = (nv == 1) ? 0 : 3; if ((nq > 1) && (nv > 1)){ n = (nq <= nv) ? nq : nv; } else { n = (nq >= nv) ? nq : nv; } #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} //omp_set_num_threads(1) #endif if ((nq == 1) && (n > 1)) { #pragma omp parallel shared(q, v, vout) private(i,j, trash, qtemp, vtemp, vouttemp) \ firstprivate(qstep, vstep, voutstep, n, p) { for (j=0; j<4; j++) qtemp[j] = (double) q[j]; #pragma omp for schedule (static) nowait for (i = 0;i < n;i++){ for (j=0; j<3; j++) vtemp[j] = (double) v[i*vstep+j]; trash = quatVector( qtemp, vtemp, vouttemp, p ); for (j=0;j<3;j++) vout[i*voutstep+j] = (float) vouttemp[j]; } } } else if ((nv == 1) && n > 1) { #pragma omp parallel shared(q, v, vout) private(i,j, trash, qtemp, vtemp, vouttemp) \ firstprivate(qstep, vstep, voutstep, n, p) { for (j=0; j<3; j++) vtemp[j] = (double) v[j]; #pragma omp for schedule (static) nowait for (i = 0;i < n;i++){ for (j=0; j<4; j++) qtemp[j] = (double) q[i*qstep+j]; trash = quatVector( qtemp, vtemp, vouttemp, p ); for (j=0;j<3;j++) vout[i*voutstep+j] = (float) vouttemp[j]; } } } else { #pragma omp parallel shared(q, v, vout) private(i,j, trash, qtemp, vtemp, vouttemp) \ firstprivate(qstep, vstep, voutstep, n, p) { #pragma omp for schedule (static) nowait for (i = 0;i < n;i++){ for (j=0; j<3; j++){ vtemp[j] = (double) v[i*vstep+j]; } for (j=0; j<4; j++){ qtemp[j] = (double) q[i*qstep+j]; } trash = quatVector( qtemp, vtemp, vouttemp, p ); for (j=0;j<3;j++) vout[i*voutstep+j] = (float) vouttemp[j]; } } } return 1; } int quatVectIDLWrap (int argc, void* argv[]) { /* Wrapper function for a quaterion mulitplication function for passing to IDL * quaterions are defined with (real, imaginary x y z) notation * * argc must equal 7 * argv[0] pointer to q * argv[1] pointer to v (the vector/s) * argv[2] pointer to vout - output * argv[3] number of quaterions in q * argv[4] number of vectors in v * argv[5] p factor for passive/active switch * argv[6] IDL type code for float (4) or double (5). */ IDL_ULONG64 *nqIn, *nvIn; IDL_LONG *typeIn, *pIn; unsigned long long nq, nv; int type, p; nqIn = (IDL_ULONG64*) argv[3]; nq = (unsigned long long) (*nqIn); nvIn = (IDL_ULONG64*) argv[4]; nv = (unsigned long long) (*nvIn); pIn = (IDL_LONG*) argv[5]; p = (int) (*pIn); typeIn = (IDL_LONG*) argv[6]; type = (int) (*typeIn); if (type == 4) { // do the input/output will be stored as float (calcs still done in double) return quatVectLoopf ( (float*) argv[0], (float*) argv[1],(float*) argv[2], nq, nv, p); } else { // double calculation and storage return quatVectLoop ( (double*) argv[0], (double*) argv[1],(double*) argv[2], nq, nv, p); } } int quatLibUnload (int argc, void* argv[]) { return 1; }
GB_subassign_00.c
//------------------------------------------------------------------------------ // GB_subassign_00: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed #include "GB_subassign_methods.h" GrB_Info GB_subassign_00 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- int64_t *restrict Ci = C->i ; const int64_t *restrict Sx = S->x ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed fully in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_NNZ (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nzombies) for (int64_t pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; return (GrB_SUCCESS) ; }
point_outlier.h
/**************************************************************************** * VCGLib o o * * Visual and Computer Graphics Library o o * * _ O _ * * Copyright(C) 2004-2016 \/)\/ * * Visual Computing Lab /\/| * * ISTI - Italian National Research Council | * * \ * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * * for more details. * * * ****************************************************************************/ #ifndef VCG_TRI_OUTLIERS__H #define VCG_TRI_OUTLIERS__H #include <vcg/space/index/kdtree/kdtree.h> namespace vcg { namespace tri { template <class MeshType> class OutlierRemoval { public: typedef typename MeshType::ScalarType ScalarType; typedef typename vcg::KdTree<ScalarType> KdTreeType; typedef typename vcg::KdTree<ScalarType>::PriorityQueue PriorityQueue; /** Compute an outlier probability value for each vertex of the mesh using the approch in the paper "LoOP: Local Outlier Probabilities". The outlier probability is stored in the vertex attribute "outlierScore". It use the input kdtree to find the kNearest of each vertex. "LoOP: local outlier probabilities" by Hans-Peter Kriegel et al. Proceedings of the 18th ACM conference on Information and knowledge management */ static void ComputeLoOPScore(MeshType& mesh, KdTreeType& kdTree, int kNearest) { vcg::tri::RequireCompactness(mesh); typename MeshType::template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore")); typename MeshType::template PerVertexAttributeHandle<ScalarType> sigma = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("sigma")); typename MeshType::template PerVertexAttributeHandle<ScalarType> plof = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("plof")); #pragma omp parallel for schedule(dynamic, 10) //MSVC supports only OMP 2 -> no unsigned int allowed in parallel for... for (int i = 0; i < (int)mesh.vert.size(); i++) { PriorityQueue queue; kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue); ScalarType sum = 0; for (int j = 0; j < queue.getNofElements(); j++) sum += queue.getWeight(j); sum /= (queue.getNofElements()); sigma[i] = sqrt(sum); } float mean = 0; #pragma omp parallel for reduction(+: mean) schedule(dynamic, 10) for (int i = 0; i < (int)mesh.vert.size(); i++) { PriorityQueue queue; kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue); ScalarType sum = 0; for (int j = 0; j < queue.getNofElements(); j++) sum += sigma[queue.getIndex(j)]; sum /= (queue.getNofElements()); plof[i] = sigma[i] / sum - 1.0f; mean += plof[i] * plof[i]; } mean /= mesh.vert.size(); mean = sqrt(mean); #pragma omp parallel for schedule(dynamic, 10) for (int i = 0; i < (int)mesh.vert.size(); i++) { ScalarType value = plof[i] / (mean * sqrt(2.0f)); double dem = 1.0 + 0.278393 * value; dem += 0.230389 * value * value; dem += 0.000972 * value * value * value; dem += 0.078108 * value * value * value * value; ScalarType op = std::max(0.0, 1.0 - 1.0 / dem); outlierScore[i] = op; } tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("sigma")); tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("plof")); }; /** Select all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0]. */ static int SelectLoOPOutliers(MeshType& mesh, KdTreeType& kdTree, int kNearest, float threshold) { ComputeLoOPScore(mesh, kdTree, kNearest); int count = 0; typename MeshType:: template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>::template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore")); for (int i = 0; i < mesh.vert.size(); i++) { if (outlierScore[i] > threshold) { mesh.vert[i].SetS(); count++; } } return count; } /** Delete all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0]. */ static int DeleteLoOPOutliers(MeshType& m, KdTreeType& kdTree, int kNearest, float threshold) { SelectLoOPOutliers(m,kdTree,kNearest,threshold); int ovn = m.vn; for(typename MeshType::VertexIterator vi=m.vert.begin();vi!=m.vert.end();++vi) if((*vi).IsS() ) tri::Allocator<MeshType>::DeleteVertex(m,*vi); tri::Allocator<MeshType>::CompactVertexVector(m); tri::Allocator<MeshType>::DeletePerVertexAttribute(m, std::string("outlierScore")); return m.vn - ovn; } }; } // end namespace tri } // end namespace vcg #endif // VCG_TRI_OUTLIERS_H
WrapOpenMP.h
/** * @file WrapOpenMP.h * @author F. Gratl * @date 4/20/18 * * @details * Provide non-OpenMP versions of the most common OpenMP function calls, * so that they don't have to be wrapped in ifdef-s every time. * * Proper wrapper and renaming necessary, because of -fopenmp-simd handling of * gcc. * * Extend when necessary. */ #pragma once #if defined(AUTOPAS_OPENMP) #include <omp.h> #include <vector> #else #include "ExceptionHandler.h" #endif namespace autopas { #if defined(AUTOPAS_OPENMP) /** * Wrapper for omp_get_thread_num(). * @return Id of the current thread. */ inline int autopas_get_thread_num() { return omp_get_thread_num(); } /** * Wrapper for omp_get_num_thread(). * @return Number of currently active threads. */ inline int autopas_get_num_threads() { return omp_get_num_threads(); } /** * Wrapper for omp_get_max_threads(). * @return Number of threads that can be activated. */ inline int autopas_get_max_threads() { return omp_get_max_threads(); } /** * AutoPasLock for the openmp case, this wraps a omp_lock_t object. To make it copyable, etc. */ class AutoPasLock { public: /** * Default constructor */ AutoPasLock() { omp_init_lock(&_lock); } /** * Move Constructor */ AutoPasLock(AutoPasLock &&) noexcept { omp_init_lock(&_lock); } /** * Copy constructor */ AutoPasLock(const AutoPasLock &) { omp_init_lock(&_lock); } /** * Assignment operator * @return reference to this object after copy */ AutoPasLock &operator=(AutoPasLock) = delete; /** * Destructor */ ~AutoPasLock() { omp_destroy_lock(&_lock); } /** * Acquire the lock. */ void lock() { omp_set_lock(&_lock); } /** * Release the lock. */ void unlock() { omp_unset_lock(&_lock); } private: omp_lock_t _lock; }; /** * Custom reductions: */ // reduction for merging vectors: {1,2} + {2,3} -> {1,2,2,3} #pragma omp declare reduction(vecMerge : std::vector<size_t> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #pragma omp declare reduction(vecMerge : std::vector<double> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #else /** * Dummy for omp_set_lock() when no OpenMP is available. * @return Always 0. */ inline int autopas_get_thread_num() { return 0; } /** * Dummy for omp_get_num_threads() when no OpenMP is available. * @return Always 1. */ inline int autopas_get_num_threads() { return 1; } /** * Dummy for omp_get_max_threads() when no OpenMP is available. * @return Always 1. */ inline int autopas_get_max_threads() { return 1; } /** * AutoPasLock for the sequential case. */ class AutoPasLock { public: /** * Default constructor */ AutoPasLock() { _locked = false; } /** * Move Constructor */ AutoPasLock(AutoPasLock &&) noexcept { _locked = false; } /** * Copy constructor */ AutoPasLock(AutoPasLock &) { _locked = false; } /** * Assignment operator * @return reference to this object after copy */ AutoPasLock &operator=(AutoPasLock) = delete; /** * Destructor */ ~AutoPasLock() { if (_locked) { utils::ExceptionHandler::exception("AutoPasLocked destroyed in locked state."); } } /** * Acquire the lock. */ void lock() { if (_locked) { utils::ExceptionHandler::exception("Tried to acquire a locked lock."); } _locked = true; } /** * Release the lock. */ void unlock() { if (not _locked) { utils::ExceptionHandler::exception("Tried to release an unlocked lock."); } _locked = false; } private: // true if locked, false if unlocked bool _locked; }; #endif // These properties are needed because we use AutoPasLock in vectors on which we call resize(). static_assert(std::is_default_constructible_v<AutoPasLock>, "AutoPasLock needs to be default constructible!"); static_assert(std::is_move_constructible_v<AutoPasLock>, "AutoPasLock needs to be move constructible!"); } // namespace autopas
GB_binop__iseq_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint16) // A*D function (colscale): GB (_AxD__iseq_uint16) // D*A function (rowscale): GB (_DxB__iseq_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint16) // C=scalar+B GB (_bind1st__iseq_uint16) // C=scalar+B' GB (_bind1st_tran__iseq_uint16) // C=A+scalar GB (_bind2nd__iseq_uint16) // C=A'+scalar GB (_bind2nd_tran__iseq_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT16 || GxB_NO_ISEQ_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__iseq_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__plus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int64) // A.*B function (eWiseMult): GB (_AemultB_08__plus_int64) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int64) // A.*B function (eWiseMult): GB (_AemultB_04__plus_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int64) // A*D function (colscale): GB (_AxD__plus_int64) // D*A function (rowscale): GB (_DxB__plus_int64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int64) // C=scalar+B GB (_bind1st__plus_int64) // C=scalar+B' GB (_bind1st_tran__plus_int64) // C=A+scalar GB (_bind2nd__plus_int64) // C=A'+scalar GB (_bind2nd_tran__plus_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT64 || GxB_NO_PLUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Host.c
#include "heat3d.h" #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) //////////////////////////////////////////////////////////////////////////////// // A method for checking error in CUDA calls //////////////////////////////////////////////////////////////////////////////// inline void __checkCuda(cudaError_t error, const char *file, const int line) { #if defined(DEBUG) || defined(_DEBUG) if (error != cudaSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); exit(-1); } #endif return; } //////////////////////////////////////////////////////////////////////////////// // Kernel for computing 3D Heat equation on the CPU //////////////////////////////////////////////////////////////////////////////// void cpu_heat3D(REAL * __restrict__ u_new, REAL * __restrict__ u_old, const REAL c0, const REAL c1, const unsigned int max_iters, const unsigned int Nx, const unsigned int Ny, const unsigned int Nz) { unsigned int j_off = (Nx+2); unsigned int k_off = j_off * (Ny+2); #pragma omp parallel default(shared) { for(unsigned int iterations = 0; iterations < max_iters; iterations++) { #pragma omp for schedule(static) for (unsigned int k = 1; k < (Nz+2)-1; k++) { for (unsigned int j = 1; j < (Ny+2)-1; j++) { for (unsigned int i = 1; i < (Nx+2)-1; i++) { unsigned int idx = i + j*j_off + k*k_off; u_new[idx] = c1 * u_old[idx] + c0 * (u_old[idx-1] + u_old[idx+1] + u_old[idx-j_off] + u_old[idx+j_off] + u_old[idx-k_off] + u_old[idx+k_off]); } } } #pragma omp single { swap(REAL*, u_old, u_new); } } } } ////////////////////// // Initializes arrays ////////////////////// void init(REAL *u_old, REAL *u_new, const REAL h, unsigned int Nx, unsigned int Ny, unsigned int Nz) { unsigned int j_off = (Nx+2); unsigned int k_off = j_off * (Ny+2); for(unsigned int k = 0; k < (Nz+2); k++) { for (unsigned int j = 0; j < (Ny+2); j++) { for (unsigned int i = 0; i < (Nx+2); i++) { unsigned int idx = i + j*j_off + k*k_off; if (i==0 || i==(Nx+2)-1 || j==0 || j==(Ny+2)-1|| k==0 || k==(Nz+2)-1) { u_old[idx] = 0.; u_new [idx] = 0.; } else { u_old[idx] = INITIAL_DISTRIBUTION(i, j, k, h); u_new[idx] = INITIAL_DISTRIBUTION(i, j, k, h); } } } } } //////////////////////////////////////////////////////////////////////////////// // Initialize the sub-domains //////////////////////////////////////////////////////////////////////////////// void init_subdomain(REAL *h_s_uold, REAL *h_uold, unsigned int Nx, unsigned int Ny, unsigned int _Nz, unsigned int i) { int idx3d = 0, idx_sd = 0; for(unsigned int z = 0; z < _Nz+2; z++) { for (unsigned int y = 0; y < Ny+2; y++) { for (unsigned int x = 0; x < Nx+2; x++) { idx3d = x + y*(Nx+2) + (z+i*_Nz)*(Nx+2)*(Ny+2); idx_sd = x + y*(Nx+2) + z*(Nx+2)*(Ny+2); h_s_uold[idx_sd] = h_uold[idx3d]; } } } } //////////////////////////////////////////////////////////////////////////////// // Merges the smaller sub-domains into a larger domain //////////////////////////////////////////////////////////////////////////////// void merge_domains(REAL *h_s_Uold, REAL *h_Uold, int Nx, int Ny, int _Nz, const int i) { int idx3d = 0, idx_sd = 0; for(int z = 1; z < _Nz+1; z++) { for (int y = 1; y < Ny+1; y++) { for (int x = 1; x < Nx+1; x++) { idx3d = x + y*(Nx+2) + (z+i*_Nz)*(Nx+2)*(Ny+2); idx_sd = x + y*(Nx+2) + z*(Nx+2)*(Ny+2); h_Uold[idx3d] = h_s_Uold[idx_sd]; } } } } //////////////////////////////////////////////////////////////////////////////// // A method that calculates the GFLOPS //////////////////////////////////////////////////////////////////////////////// float CalcGflops(float computeTimeInSeconds, unsigned int iterations, unsigned int nx, unsigned int ny, unsigned int nz) { return iterations*(double)((nx * ny * nz) * 1e-9 * FLOPS)/computeTimeInSeconds; } //////////////////////////////////////////////////////////////////////////////// // Calculates the error/L2 norm //////////////////////////////////////////////////////////////////////////////// void CalcError(REAL *uOld, REAL *uNew, const REAL t, const REAL h, unsigned int nx, unsigned int ny, unsigned int nz) { unsigned int j_off = (nx+2); unsigned int k_off = j_off*(ny+2); double error = 0., l2_uold = 0., l2_unew = 0., l2_error = 0.; for (unsigned int k = 1; k <= nz; k++) { for (unsigned int j = 1; j <= ny; j++) { for (unsigned int i = 1; i <= nx; i++) { unsigned int idx = i + j*j_off + k*k_off; REAL analytical = (exp(-3*M_PI*M_PI*t) * INITIAL_DISTRIBUTION(i, j, k, h)) - uOld[idx]; l2_error += analytical * analytical; error += (uOld[idx]-uNew[idx])*(uOld[idx]-uNew[idx]); l2_uold += (uOld[idx])*(uOld[idx]); l2_unew += (uNew[idx])*(uNew[idx]); } } } l2_uold = sqrt(l2_uold/(nx*ny*nz)); l2_unew = sqrt(l2_unew/(nx*ny*nz)); l2_error = sqrt(l2_error*h*h*h); printf("RMS diff : %e\n", sqrt(error/(nx*ny*nz))); printf("L2 norm (GPU) : %e\n", l2_uold); printf("L2 norm (CPU) : %e\n", l2_unew); printf("L2 error : %e\n", l2_error); } //////////////////////////////////////////////////////////////////////////////// // Prints experiment summary //////////////////////////////////////////////////////////////////////////////// void PrintSummary(const char* kernelName, const char* optimization, double computeTimeInSeconds, double hostToDeviceTimeInSeconds, double deviceToHostTimeInSeconds, float gflops, const int computeIterations, const int nx) { printf("===========================%s=======================\n", kernelName); printf("Optimization : %s\n", optimization); printf("Kernel time ex. data transfers : %lf seconds\n", computeTimeInSeconds); printf("Data transfer(s) HtD : %lf seconds\n", hostToDeviceTimeInSeconds); printf("Data transfer DtH : %lf seconds\n", deviceToHostTimeInSeconds); printf("===================================================================\n"); printf("Total effective GFLOPs : %lf\n", gflops); printf("===================================================================\n"); printf("3D Grid Size : %d\n", nx); printf("Iterations : %d\n", computeIterations); printf("===================================================================\n"); } //////////////////////////////////////////////////////////////////////////////// // Prints a flattened 3D array //////////////////////////////////////////////////////////////////////////////// void print3D(REAL *T, const unsigned int Nx, const unsigned int Ny, const unsigned int Nz) { for(unsigned int z = 0; z < Nz+2; z++) { for (unsigned int y = 0; y < Ny+2; y++) { for (unsigned int x = 0; x < Nx+2; x++) { unsigned int idx3d = x + y*(Nx+2) + z*(Nx+2)*(Ny+2); printf("%8.2f", T[idx3d]); } printf("\n"); } printf("\n"); } } //////////////////////////////////////////////////////////////////////////////// // Prints a flattened 2D array //////////////////////////////////////////////////////////////////////////////// void print2D(REAL *T, const unsigned int Nx, const unsigned int Ny) { for (unsigned int y = 0; y < Ny+2; y++) { for (unsigned int x = 0; x < Nx+2; x++) { unsigned int idx = y * Nx+2 + x; printf("%8.2f", T[idx]); } printf("\n"); } printf("\n"); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-254,256)),ceild(3*t1-510,512)),ceild(24*t2-Nz-2035,2048)),ceild(4*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(12*t1+Nx+15,2048)),floord(24*t2+Nx+11,2048)),floord(4*t3+Nx-9,2048)),floord(24*t1-24*t2+Nz+Nx+13,2048));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),512*t4+510);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
TAD.h
// // @author Adam Gibson // #ifndef LIBND4J_TAD_H #define LIBND4J_TAD_H #include <helpers/shape.h> #include <pointercast.h> namespace shape { /** * Dimension collapse is an algorithm * for collapsing singular dimensions. * This algorithm will adjust the dimensions * wrt the original. * * The algorithm has 3 components: * trailing ones * middle ones * beginning ones * * dimensions that are specified to reduce along * that are singular should be truncated * * dimensions that are specified that are singular * at the beginning should be removed with middle dimensions * decremented. * * For any time there is a no op, a collapse will * set the first dimension to be -1. * * */ class TAD { public: int tadIndex = 0; int dimensionLength; int *dimension = nullptr; int *shapeInfo = nullptr; int *tadOnlyShapeInfo = nullptr; int numTads = 0; int tadRank = 0; int *tadShape = nullptr; int *tadStride = nullptr; Nd4jIndex *tadOffsets = nullptr; int tadOffsetForBlock = 0; int rank = 0; int numOnes = 0; //pointers to original int originalDimensionLength; int *originalDimension = nullptr; int *originalShapeInfo = nullptr; bool squeezed = false; bool newSqueezeDimensions = false; int numOnesInMiddle = 0; bool wholeThing = false; //need to track whether we create a new dimension array or not, we could have just moved the pointer forward //due to leading ones bool createdNewDimension = false; // special case for CUDA, we're passing in __shared__ memory pointers to be used instead of new/malloc void *ptrManager = nullptr; int *ptrOutput = nullptr; #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF TAD() {} #ifdef __CUDACC__ __host__ __device__ #endif TAD(int tadIndex,int *shapeInfo,int *dimension,int dimensionLength); #ifdef __CUDACC__ __host__ __device__ #endif TAD(int *shapeInfo,int *dimension,int dimensionLength); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void setExternalBuffers(void *ptrManager); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void setOutputBuffer(int *ptrOutput); #ifdef __CUDACC__ __host__ __device__ #endif /** * This method is for GPU mostly, it allows to initialize TAD instance with precalculated tadOnlyShapeInfo */ INLINEDEF void initWithExternalTAD(int *existingTAD, int *originalShape, int *dimension, int dimensionLength); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void init(int *shapeInfo,int *dimension,int dimensionLength); template <typename T> #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void printTADsND(T *x); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *permuteShapeBuffer(int *shapeBuffer,int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void createTadOnlyShapeInfo(); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int lengthPerSlice(int *shapeBuffer); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * tad2Sub(int index); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF ~TAD(); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* permuteDims(); /** * Compute the tad offset given a dimension. * * The general pattern for computing a tad offset is as follows: * Every $STRIDE that was removed (the first dimension) * do a jump by the major stride of the parent array * (stride[0] of the parent array) * * For example given a c ordered 2,2,3,2 with stride 12,6,2,1 * A tad of dimension 1 will jump 12 every 6 tads. * * You then end up with offsets of: * 0 * 1 * 2 * 3 * 4 * 5 * 12 * 13 * 14 * 15 * 16 * 17 * * notice there are 12 tads here. This same incremental jump will happen * every time. * Note here that by default the * stride of element wise stride is used for the hops. * * Sometimes a jump doesn't happen. If there are less tads * than the stride of the dimension you removed, the * element wise stride will always be used. * * For example in a dimension of 0,1, you end up with offsets of: * 0,1,2,3,4,5 * * Given that the inner most stride of the dimensions that was removed (1) * had a stride of 6, we never need to do a major stride jump. * */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex tadOffset(int index); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *tensorShape(); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * tad2Sub(int index, void *ptrManager); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void createOffsets(); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeInfoOnlyShapeAndStride(); /** * Length of a tad given * the shape information */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadLength(int *shapeInfo, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength); #ifdef __CUDACC__ __host__ __device__ INLINEDEF void createOffsetForBlock(int blockIdx) { this->tadOffsetForBlock = this->tadOffset(blockIdx); } #endif #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void collapse(); }; //// #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF TAD::TAD(int tadIndex,int *shapeInfo,int *dimension,int dimensionLength) { this->tadIndex = tadIndex; this->init(shapeInfo, dimension, dimensionLength); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF TAD::TAD(int *shapeInfo,int *dimension,int dimensionLength) { this->init(shapeInfo, dimension, dimensionLength); } INLINEDEF void TAD::setExternalBuffers(void *ptrManager) { this->ptrManager = ptrManager; } INLINEDEF void TAD::setOutputBuffer(int *ptrOutput) { this->ptrOutput = ptrOutput; } INLINEDEF void TAD::initWithExternalTAD(int *existingTAD, int *originalShape, int *dimension, int dimensionLength) { this->tadOnlyShapeInfo = existingTAD; this->rank = shape::rank(originalShape); this->originalShapeInfo = originalShape; this->originalDimension = dimension; this->originalDimensionLength = dimensionLength; this->shapeInfo = originalShape; this->dimension = dimension; this->dimensionLength = dimensionLength; this->tadShape = shape::shapeOf(existingTAD); this->tadStride = shape::stride(existingTAD); int ews = shape::elementWiseStride(originalShape); this->numTads = shape::length(originalShape) / shape::length(existingTAD); // this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength);//shape::length(originalShape) / shape::length(existingTAD); this->wholeThing = this->numTads == 1 || ((this->dimensionLength == this->rank || this->numTads == shape::length(this->shapeInfo)) && ews == 1); } INLINEDEF void TAD::init(int *shapeInfo,int *dimension,int dimensionLength) { this->originalShapeInfo = shapeInfo; this->originalDimension = dimension; this->originalDimensionLength = dimensionLength; //start off as original references this->shapeInfo = shapeInfo; this->dimensionLength = dimensionLength; this->dimension = dimension; this->rank = shape::rank(shapeInfo); this->numTads = dimensionLength == 0 ? 1 : this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength); int ews = shape::elementWiseStride(shapeInfo); if(!shape::isVector(shapeInfo)) wholeThing = this->numTads == 1 || ((this->dimensionLength == this->rank || this->numTads == shape::length(shapeInfo)) && ews == 1); else if(shape::isScalar(shapeInfo)) wholeThing = true; //vector case else { // if(dimensionLength == 1 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) { if(dimension == 0 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) { wholeThing = true; } } } template <typename T> INLINEDEF void TAD::printTADsND(T *x) { if(wholeThing) { for(int i = 0; i < shape::length(tadOnlyShapeInfo); i++) { printf(" %f ",x[i]); } printf("\n"); } else { for (int i = 0; i < numTads; i++) { int offset = tadOffsets[i]; int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int rankIter = shape::rank(tadOnlyShapeInfo); int xStridesIter[MAX_RANK]; T *xPointer = x + offset; if (PrepareOneRawArrayIter<T>(rankIter, shape::shapeOf(tadOnlyShapeInfo), xPointer, shape::stride(tadOnlyShapeInfo), &rankIter, shapeIter, &xPointer, xStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, shape::rank(tadOnlyShapeInfo), coord, shapeIter); { /* Process the innermost dimension */ printf(" %f ",xPointer[0]); } ND4J_RAW_ITER_ONE_NEXT(dim, rankIter, coord, shapeIter, xPointer, xStridesIter); printf("\n"); } else { printf("Unable to prepare array\n"); } } } } INLINEDEF void TAD::permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out) { memcpy(out,shapeBuffer,sizeof(int) * shape::shapeInfoLength(this->rank)); doPermuteShapeBuffer(this->rank,out,rearrange); } INLINEDEF int* TAD::permuteShapeBuffer(int *shapeBuffer,int *rearrange) { int len = shape::shapeInfoLength(this->rank); int *copy = shape::copyOf(len,shapeBuffer); doPermuteShapeBuffer(rank,copy,rearrange); return copy; } INLINEDEF void TAD::createTadOnlyShapeInfo() { this->tadOnlyShapeInfo = this->shapeInfoOnlyShapeAndStride(); if (this->tadShape != nullptr) delete[] this->tadShape; this->tadShape = shape::shapeOf(this->tadOnlyShapeInfo); this->tadStride = shape::stride(this->tadOnlyShapeInfo); /* if(tadIndex > 0) { this->createOffsets(); this->tadOnlyShapeInfo[shape::shapeInfoLength(shape::rank(this->tadOnlyShapeInfo)) - 3] = this->tadOffsets[tadIndex]; }*/ } INLINEDEF int TAD::lengthPerSlice(int *shapeBuffer) { int dimension = 0; int *remove = shape::removeIndex(shape::shapeOf(shapeBuffer),&dimension,shape::rank(shapeBuffer),1); int prod = shape::prod(remove,shape::rank(shapeBuffer) - 1); delete[] remove; return prod; } INLINEDEF int * TAD::tad2Sub(int index) { int *shape = shape::shapeOf(shapeInfo); int rank = shape::rank(shapeInfo); int leftOverIndexLen = rank - originalDimensionLength; #ifdef __CUDACC__ int *ret; int *tadShape; int *leftOverIndexes; int *sub; if (ptrManager != nullptr) { UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager; ret = manager->getTempRankBuffer1(); tadShape = manager->getTempRankBuffer2(); leftOverIndexes = manager->getTempRankBuffer3(); sub = manager->getTempRankBuffer4(); } else { ret = new int[rank]; tadShape = new int[leftOverIndexLen]; leftOverIndexes = new int[leftOverIndexLen]; sub = new int[rank]; } #else int *ret = new int[rank]; //shape of the tad int *tadShape = new int[leftOverIndexLen]; int *leftOverIndexes = new int[leftOverIndexLen]; int *sub = new int[rank]; #endif //indexes not specified in the tad indexes //every coordinate starts as zero memset(ret,0,sizeof(int) * rank); //find the length of the elements we //are iterating over int len = 1; //left over index cursor for initializing elements int leftOverIndex = 0; for(int i = 0; i < rank; i++) { //look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference) bool found = false; for(int j = 0; j < originalDimensionLength; j++) { //skip over specified dimensions when computing left over length if(i == originalDimension[j]) { found = true; break; } } //add to the indexes that aren't specified as part of the tad dimension //indexes if(!found) { //accumulate the list of indexes left over used for initializing the return value leftOverIndexes[leftOverIndex] = i; //accumulate the tad shape tadShape[leftOverIndex] = shape[i]; //accumulate the length (product) of the indexes that will be iterated over len *= shape[i]; leftOverIndex++; } } //sub for indices /* int *sub = new int[leftOverIndexLen]; shape::ind2subOrder(tadShape,index,len,sub); */ shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub); for(int i = 0; i < leftOverIndexLen; i++) { ret[leftOverIndexes[i]] = sub[i]; } if (ptrManager == nullptr) { delete[] tadShape; delete[] leftOverIndexes; delete[] sub; } return ret; } INLINEDEF TAD::~TAD() { //we may have just moved the pointer forward, we may not need to delete the pointer here if(originalDimension != this->dimension && createdNewDimension) { delete[] this->dimension; } if(this->originalShapeInfo != this->shapeInfo) { delete[] this->shapeInfo; } if(this->tadOffsets != nullptr) { delete[] this->tadOffsets; } if(this->tadOnlyShapeInfo != nullptr && this->tadOnlyShapeInfo != shapeInfo) { delete[] this->tadOnlyShapeInfo; } } INLINEDEF int* TAD::permuteDims() { //permute dimensions for tad int dimIdx = 0; //loop backwards assuming dimension is sorted int *permuteDims = new int[shape::rank(shapeInfo)]; for(int i = 0; i < shape::rank(shapeInfo); i++) { bool found = false; for(int j = 0; j < originalDimensionLength; j++) { if(i == originalDimension[j]) { found = true; break; } } //not found, append it to the end for permute if(!found) permuteDims[dimIdx++] = i; } for(int i = originalDimensionLength - 1; i >= 0; i--) { permuteDims[dimIdx++] = originalDimension[i]; } /* for (int i = 0; i < originalDimensionLength; i++) { permuteDims[i] = originalDimension[i]; } */ //permute dimensions for tad return permuteDims; } INLINEDEF Nd4jIndex TAD::tadOffset(int index) { if(tadOnlyShapeInfo == nullptr) { this->createTadOnlyShapeInfo(); } if(wholeThing) return index; if(dimensionLength > 1) { int *tad2Sub = this->tad2Sub(index,ptrManager); Nd4jIndex ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo)); if(ret < 0) { if (ptrManager == nullptr) delete[] tad2Sub; return -1; } if (ptrManager == nullptr) delete[] tad2Sub; return ret; } else { int *tad2Sub = this->tad2Sub(index,ptrManager); Nd4jIndex ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo)); if (ptrManager == nullptr) delete[] tad2Sub; return ret; } } INLINEDEF int* TAD::tensorShape() { if(this->tadShape != nullptr) return this->tadShape; int *theShape = shape::shapeOf(shapeInfo); int *tensorShape = shape::keep(theShape,dimension,dimensionLength,shape::rank(shapeInfo)); this->tadShape = tensorShape; this->tadRank = dimensionLength; return tensorShape; } INLINEDEF int * TAD::tad2Sub(int index, void *ptrManager) { int *shape = shape::shapeOf(shapeInfo); int rank = shape::rank(shapeInfo); int leftOverIndexLen = rank - originalDimensionLength; int *tadShape; int *leftOverIndexes; int *sub; int *ret; #ifdef __CUDACC__ if (ptrManager != nullptr) { UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager; ret = manager->getTempRankBuffer1(); tadShape = manager->getTempRankBuffer2(); leftOverIndexes = manager->getTempRankBuffer3(); sub = manager->getTempRankBuffer4(); } else { ret = new int[rank]; //shape of the tad leftOverIndexes = new int[leftOverIndexLen]; sub = new int[rank]; tadShape = new int[leftOverIndexLen]; } #else ret = new int[rank]; //shape of the tad leftOverIndexes = new int[leftOverIndexLen]; sub = new int[rank]; tadShape = new int[leftOverIndexLen]; #endif //indexes not specified in the tad indexes //every coordinate starts as zero memset(ret,0,sizeof(int) * rank); //find the length of the elements we //are iterating over int len = 1; //left over index cursor for initializing elements int leftOverIndex = 0; for(int i = 0; i < rank; i++) { //look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference) bool found = false; for(int j = 0; j < originalDimensionLength; j++) { //skip over specified dimensions when computing left over length if(i == originalDimension[j]) { found = true; break; } } //add to the indexes that aren't specified as part of the tad dimension //indexes if(!found) { //accumulate the list of indexes left over used for initializing the return value leftOverIndexes[leftOverIndex] = i; //accumulate the tad shape tadShape[leftOverIndex] = shape[i]; //accumulate the length (product) of the indexes that will be iterated over leftOverIndex++; len *= shape[i]; } } //sub for indices /* int *sub = new int[leftOverIndexLen]; shape::ind2subOrder(tadShape,index,len,sub); */ shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub); for(int i = 0; i < leftOverIndexLen; i++) { ret[leftOverIndexes[i]] = sub[i]; } if (ptrManager == nullptr) { delete[] leftOverIndexes; delete[] tadShape; delete[] sub; } return ret; } INLINEDEF void TAD::createOffsets() { this->tadOffsets = new Nd4jIndex[this->numTads]; #pragma omp parallel for schedule(guided) proc_bind(close) default(shared) for(int i = 0; i < this->numTads; i++) { this->tadOffsets[i] = this->tadOffset(i); } } INLINEDEF int* TAD::shapeInfoOnlyShapeAndStride() { if(wholeThing && (dimensionLength == 1 && dimension[0] == MAX_DIMENSION) ) return shape::createScalarShapeInfo(); //ensure tad shapes get setup right for vectors if(dimensionLength > 1 && shape::isVector(shapeInfo)) return shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)),shapeInfo); // case when tad coincides with whole array if( this->numTads == 1 && ((shape::rank(originalShapeInfo) == originalDimensionLength) || originalDimensionLength == 0)) { // we might have special case here: skipped dimensions might be just full of ones int *ret = shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)), shapeInfo); if (shape::isDimPermuted(dimension, dimensionLength)) // check whether we need permutation shape::doPermuteShapeBuffer(ret, dimension); return ret; } int *theShape = shape::shapeOf(shapeInfo); int rank = shape::rank(shapeInfo); if(dimensionLength == 1) { if(dimension[0] == 0 && shape::isVector(shapeInfo) && theShape[1] == 1) { int permuted[2] = {1,0}; int *permutedRet2 = shape::permuteShapeBuffer(shapeInfo,permuted); return permutedRet2; } else if(dimension[0] == 1 && shape::isVector(shapeInfo) && theShape[0] == 1) { return shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)),shapeInfo); } else if(shape::shapeOf(shapeInfo)[dimension[0]] == 1) { int *scalarInfo = shape::createScalarShapeInfo(); scalarInfo[shape::shapeInfoLength(shape::rank(scalarInfo)) - 3] = this->tadIndex; return scalarInfo; } } int *tensorShape = this->tensorShape(); int *reverseDimensions = shape::reverseCopy(dimension,dimensionLength); int *rankRange = shape::range(0,rank); int *remove = shape::removeIndex(rankRange,dimension,rank,dimensionLength); //concat is wrong here with the length int *newPermuteDims = shape::concat(remove,rank - dimensionLength,reverseDimensions,dimensionLength); int *permuted = shape::permuteShapeBuffer(shapeInfo,newPermuteDims); int sliceIndex = shape::sliceOffsetForTensor(shape::rank(permuted), this->tadIndex, shape::shapeOf(shapeInfo), tensorShape, dimensionLength, dimension, dimensionLength); int *ret2 = shape::sliceOfShapeBuffer(sliceIndex,permuted); int tensorLength = shape::prod(tensorShape,tadRank); int compLength = shape::isVector(ret2) ? shape::length(ret2) : shape::prod(tensorShape,tadRank); // const bool isLikeVector = shape::isLikeVector(ret2); // if(dimensionLength == tadRank && compLength == shape::length(ret2) && !isLikeVector) { if(dimensionLength == tadRank && compLength == shape::length(ret2)) { if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) { //go to the bottom and return ret2 after proper freeing of pointers //basic idea; we *don't* permute row vectors } else if(dimensionLength > 1) { //permute *then* return ret2 int *finalPermuteDims = new int[shape::rank(ret2)]; int forward = 0; for(int i = shape::rank(ret2) - 1; i >= 0; i--) { finalPermuteDims[forward++] = i; } shape::permuteShapeBufferInPlace(ret2,finalPermuteDims,ret2); delete[] finalPermuteDims; } } else { int length = tensorLength; int lengthPerSlice = this->lengthPerSlice(ret2); int offset = tadIndex * tensorLength /lengthPerSlice; if(sliceIndex == 0 && length == lengthPerSlice) { int *newRet2 = shape::sliceOfShapeBuffer(offset,ret2); delete[] ret2; ret2 = newRet2; int *finalPermuteDims = new int[shape::rank(ret2)]; int forward = 0; for(int i = shape::rank(ret2) - 1; i >= 0; i--) { finalPermuteDims[forward++] = i; } // bool isRowVector2 = shape::isRowVector(ret2) && !isLikeVector; bool isRowVector2 = shape::isRowVector(ret2); if(isRowVector2 == false) { shape::permuteShapeBufferInPlace(ret2, finalPermuteDims, ret2); } delete[] finalPermuteDims; } else if(length == lengthPerSlice) { offset -= shape::slices(ret2) * (offset / shape::slices(ret2)); int *newRet2 = shape::sliceOfShapeBuffer(offset,ret2); delete[] ret2; ret2 = newRet2; if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) { //go to the bottom and return ret2 after proper freeing of pointers //basic idea; we *don't* permute row vectors } else { int *finalPermuteDims = new int[shape::rank(ret2)]; int forward = 0; for(int i = shape::rank(ret2) - 1; i >= 0; i--) { finalPermuteDims[forward++] = i; } int *newRet = shape::permuteShapeBuffer(ret2,finalPermuteDims); delete[] ret2; delete[] finalPermuteDims; ret2 = newRet; } } else { //execute final part, note that this is mainly so delete[] gets called //at the bottom of the method while(shape::length(ret2) > length) { int lengthPerSlice2 = this->lengthPerSlice(ret2); sliceIndex = sliceOffsetForTensor(sliceIndex,shape::length(ret2),lengthPerSlice2); sliceIndex -= shape::slices(ret2) * (sliceIndex / shape::slices(ret2)); int *newRet2 = shape::sliceOfShapeBuffer(sliceIndex,ret2); delete[] ret2; ret2 = newRet2; } //don't permute on a row vector if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) { //go to the bottom and return ret2 after proper freeing of pointers //basic idea; we *don't* permute row vectors } else if(dimensionLength > 1){ //permute *then* return ret int *finalPermuteDims = new int[shape::rank(ret2)]; int forward = 0; for(int i = shape::rank(ret2) - 1; i >= 0; i--) { finalPermuteDims[forward++] = i; } int *newPermute = shape::permuteShapeBuffer(ret2,finalPermuteDims); delete[] ret2; delete[] finalPermuteDims; ret2 = newPermute; } } } delete[] permuted; delete[] newPermuteDims; delete[] rankRange; delete[] remove; delete[] reverseDimensions; return ret2; } INLINEDEF int TAD::tadLength(int *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } INLINEDEF int TAD::tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength) { return shape::length(shapeInfo) / this->tadLength(shapeInfo,dimension,dimensionLength); } INLINEDEF void TAD::collapse() { int *shape = shape::shapeOf(shapeInfo); //handle negative dimensions/backwards indexing for(int i = 0; i < dimensionLength; i++) { if((dimension)[i] < 0) (dimension)[i] += shape::rank(this->shapeInfo); } this->dimension = new int[dimensionLength]; memcpy(this->dimension,this->originalDimension,sizeof(int) * dimensionLength); //we can drop trailing dimensions where it's all singular for example: // shape: 4,3,1,2 //dimension: 0,2 // the problem for 0,2 is equivalent to: 0 //the rest of the algorithm handles cases suchas //shape: 4,1,1,2 //dimension: 0,1 //when this happens there are other dimensions (eg: at the end) that matter int trailingOneDimensions = 0; //trailing ones for(int i = dimensionLength - 1; i >= 0; i--) { if(shape[dimension[i]] != 1) { break; } else if(shape[dimension[i]] == 1) trailingOneDimensions++; } dimensionLength -= trailingOneDimensions; int leadingOneDimensions = 0; //trailing ones for(int i = 0; i < dimensionLength; i++) { if(shape[dimension[i]] != 1) { break; } else if(shape[dimension[i]] == 1) leadingOneDimensions++; } //bump the dimension pointer forward for however many leadingones there are dimension += leadingOneDimensions; //decrease the dimension length by the amount of leading ones dimensionLength -= leadingOneDimensions; bool preConverged = true; for(int i = 0; i < dimensionLength; i++) { if(shape[dimension[i]] == 1) { preConverged = false; break; } } //we took away all the singular dimensions, we can just return if(preConverged) return; //no more singular dimensions specified bool done = false; int onesDecrement = 0; bool changed = false; while(!done) { //terminate early: only singular dimensions specified for reduce if((dimensionLength) < 1) { done = true; //signal as a no op dimension[0] = -1; break; } //captures intermediary result from the for loop traceNew(3); int intermediaryResult[MAX_RANK]; for(int i = 0; i < dimensionLength; i++) { intermediaryResult[i] = (dimension)[i]; } bool oneEncountered = false; bool nonOneEncountered = false; bool hitBeginning = false; //assume intermediate collapsing of dimensions bool collapseMiddleDimensions = true; //note here that dimension length MAY end up being zero for(int i = (dimensionLength) - 1; i >= 0; i--) { if(shape[(dimension)[i]] == 1) { oneEncountered = true; //trailing ones if(!nonOneEncountered) { //just drop trailing ones dimensionLength--; nonOneEncountered = false; collapseMiddleDimensions = false; //intermediary result just needs to have the results copied from dimension since we're just removing the tail memcpy(intermediaryResult,dimension,sizeof(int) * dimensionLength); changed = true; //break the for loop and force it to go back around starting from the new index break; } else { //already decremented all dimensions //this was a result of hitting beginning ones //we will only need to loop once if(i == 0) { hitBeginning = true; } //will need to shift dimensions that aren't trailing ones //back by onesDecrement //mark the intermediary result as -1 for non inclusion intermediaryResult[i] = -1; onesDecrement++; } } else { intermediaryResult[i] = (dimension)[i]; nonOneEncountered = true; } } if(collapseMiddleDimensions && oneEncountered) { //collapse dimensions int newIntermediary[MAX_RANK]; int idx = 0; for(int i = 0; i < dimensionLength; i++) { //of note: dimension will decrease by the number of ones encountered if(intermediaryResult[i] >= 0) { //dimension 0 doesn't need to be decremented if(intermediaryResult[i] > 0) newIntermediary[idx++] = intermediaryResult[i] - onesDecrement; else newIntermediary[idx++] = intermediaryResult[i]; } } //decrement by the number of dimensions where ones appeared (dimensionLength) -= onesDecrement; //update to current result memcpy(dimension,newIntermediary,sizeof(int) * (dimensionLength)); changed = true; } //converged: no need to change result else { //update to current result memcpy(dimension,intermediaryResult,sizeof(int) * dimensionLength); } //converge when there are no singular dimensions specified in the reduce done = (!oneEncountered && nonOneEncountered) || hitBeginning; //delete[] intermediaryResult; } //nothing changed but need to collapse dimension if(!changed && this->numOnes > 0) { for(int i = 0; i < dimensionLength ;i++) { dimension[i] -= numOnes; } } } } #endif //LIBND4J_TAD_H
Math.h
// // Created by Jarlene on 2017/7/21. // #ifndef MATRIX_MATH_H #define MATRIX_MATH_H #include <math.h> #include <assert.h> #include <functional> #include <vector> #include <random> #include <sys/time.h> #include "Logger.h" #include "Eigen.h" #ifdef USE_MP #include <omp.h> #endif #ifdef USE_MKL #ifndef BLAS #define BLAS #endif #include <mkl.h> #include <mkl_cblas.h> #include <mkl_vsl.h> #include <mkl_vsl_functions.h> #elif defined(USE_BLAS) #ifndef BLAS #define BLAS #endif #include <cblas.h> #endif namespace matrix { static inline bool isLess(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } static struct timeval tv; static std::mt19937 rnd_engine_; enum BlasTranspose { NoTrans, Trans, ConjTrans }; /// C := alpha*op(A)*op(B) + beta*C /// \tparam T the type of input data /// \param TransA /// \param TransB /// \param M /// \param N /// \param K /// \param alpha /// \param A /// \param B /// \param beta /// \param C template <class T> inline void CPUGemm(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const T alpha, const T *A, const T *B, const T beta, T *C); template <> inline void CPUGemm<float>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const float alpha, const float *A, const float *B, const float beta, float *C) { #ifdef BLAS int lda = (TransA == NoTrans) ? K : M; int ldb = (TransB == NoTrans) ? N : K; CBLAS_TRANSPOSE Atrans, Btrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; } switch (TransB) { case NoTrans: Btrans = CblasNoTrans; break; case Trans: Btrans = CblasTrans; break; case ConjTrans: Btrans = CblasConjTrans; break; } cblas_sgemm(CblasRowMajor, Atrans, Btrans, M, N, K, alpha, A, lda, B, ldb, beta, C, N); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans) ? M : K; // A 的行 int ldb = (TransB == NoTrans) ? N : K; // B 的列 int aCol = (TransA == NoTrans) ? K : M; // A的列 auto aMatrix = create<float>(A, lda, aCol); auto bMatrix = create<float>(B, aCol, ldb); auto cMatrix = create<float>(C, lda, ldb); cMatrix = alpha * aMatrix * bMatrix + beta * cMatrix; #endif } template <> inline void CPUGemm<double>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const double alpha, const double *A, const double *B, const double beta, double *C) { #ifdef BLAS int lda = (TransA == NoTrans) ? K : M; int ldb = (TransB == NoTrans) ? N : K; CBLAS_TRANSPOSE Atrans, Btrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; } switch (TransB) { case NoTrans: Btrans = CblasNoTrans; break; case Trans: Btrans = CblasTrans; break; case ConjTrans: Btrans = CblasConjTrans; break; } cblas_dgemm(CblasRowMajor, Atrans, Btrans, M, N, K, alpha, A, lda, B, ldb, beta, C, N); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans) ? M : K; // A 的行 int ldb = (TransB == NoTrans) ? K : N; // B 的列 int aCol = (TransA == NoTrans) ? K : M; // A的列 auto aMatrix = create<double>(A, lda, aCol); auto bMatrix = create<double>(B, aCol, ldb); auto cMatrix = create<double>(C, lda, ldb); cMatrix = alpha * aMatrix * bMatrix + beta * cMatrix; #endif } template <> inline void CPUGemm<int>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const int alpha, const int *A, const int *B, const int beta, int *C) { #ifdef USE_EIGEN int lda = (TransA == NoTrans) ? M : K; // A 的行 int ldb = (TransB == NoTrans) ? N : K; // B 的列 int aCol = (TransA == NoTrans) ? K : M; // A的列 auto aMatrix = create<int>(A, lda, aCol); auto bMatrix = create<int>(B, aCol, ldb); auto cMatrix = create<int>(C, lda, ldb); cMatrix = alpha * aMatrix * bMatrix + beta * cMatrix; #endif } template <> inline void CPUGemm<long>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const long alpha, const long *A, const long *B, const long beta, long *C) { } /// y := alpha*A*x + beta*y, or y := alpha*A^T*x + beta*y, /// \tparam T /// \param TransA /// \param M /// \param N /// \param alpha /// \param A /// \param x /// \param beta /// \param y template <class T> inline void CPUGemv(const BlasTranspose TransA, const int M, const int N, const T alpha, const T *A, const T *x, const T beta, T *y); template <> inline void CPUGemv<float>(const BlasTranspose TransA, const int M, const int N, const float alpha, const float *A, const float *x, const float beta, float *y) { #ifdef BLAS CBLAS_TRANSPOSE Atrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; default: break; } cblas_sgemv(CblasRowMajor, Atrans, M, N, alpha, A, N, x, 1, beta, y, 1); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans)? M : N; int cda = (TransA == NoTrans)? N : M; auto aMatrix = create<>(A, lda, cda); auto xVector = create<>(x, cda); auto yVector = create<>(y, lda); yVector = alpha * aMatrix * xVector + beta * yVector; #endif } template <> inline void CPUGemv<double>(const BlasTranspose TransA, const int M, const int N, const double alpha, const double *A, const double *x, const double beta, double *y) { #ifdef BLAS CBLAS_TRANSPOSE Atrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; default: break; } cblas_dgemv(CblasRowMajor, Atrans, M, N, alpha, A, N, x, 1, beta, y, 1); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans)? M : N; int cda = (TransA == NoTrans)? N : M; auto aMatrix = create<double>(A, lda, cda); auto xVector = create<double>(x, cda); auto yVector = create<double>(y, lda); yVector = alpha * aMatrix * xVector + beta * yVector; #endif } template <> inline void CPUGemv<int>(const BlasTranspose TransA, const int M, const int N, const int alpha, const int *A, const int *x, const int beta, int *y) { #ifdef USE_EIGEN int lda = (TransA == NoTrans)? M : N; int cda = (TransA == NoTrans)? N : M; auto aMatrix = create<int>(A, lda, cda); auto xVector = create<int>(x, cda); auto yVector = create<int>(y, lda); yVector = alpha * aMatrix * xVector + beta * yVector; #endif } template <> inline void CPUGemv<long>(const BlasTranspose TransA, const int M, const int N, const long alpha, const long *A, const long *x, const long beta, long *y) { } /// Y = alpha * X + Y /// \tparam T /// \param N /// \param alpha /// \param X /// \param incx /// \param Y /// \param incy template <class T> inline void CPUAxpy(const int N, const T alpha, const T *X, int incx, T *Y, int incy); template <> inline void CPUAxpy<float>(const int N, const float alpha, const float *X, int incx, float *Y, int incy) { #ifdef BLAS cblas_saxpy(N, alpha, X, incx, Y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } #endif } template <> inline void CPUAxpy<double>(const int N, const double alpha, const double *X, int incx, double *Y, int incy) { #ifdef BLAS cblas_daxpy(N, alpha, X, incx, Y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } #endif } template <> inline void CPUAxpy<int>(const int N, const int alpha, const int *X, int incx, int *Y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } } template <> inline void CPUAxpy<long>(const int N, const long alpha, const long *X, int incx, long *Y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } } /** * Y = alpha * X + beta * Y * @param T * @param N * @param alpha * @param X * @param beta * @param Y */ template <class T> inline void CPUAxpby(const int N, const T alpha, const T *X, int incx, const T beta, T *Y, int incy); template <> inline void CPUAxpby<float>(const int N, const float alpha, const float *X, int incx, const float beta, float *Y, int incy) { #ifdef BLAS cblas_saxpby(N, alpha, X, incx, beta, Y, incy); #endif } template <> inline void CPUAxpby<double>(const int N, const double alpha, const double *X, int incx, const double beta, double *Y, int incy) { #ifdef BLAS cblas_daxpby(N, alpha, X, incx, beta, Y, incy); #endif } template <> inline void CPUAxpby<int>(const int N, const int alpha, const int *X, int incx, const int beta, int *Y, int incy) { } template <> inline void CPUAxpby<long>(const int N, const long alpha, const long *X, int incx, const long beta, long *Y, int incy) { } /// Y=X /// \tparam T /// \param N /// \param x /// \param incx /// \param y /// \param incy template <class T> inline void CPUCopy(const int N, const T* x, int incx, T* y, int incy); template <> inline void CPUCopy<float>(const int N, const float* x, int incx, float* y, int incy) { #ifdef BLAS cblas_scopy(N, x, incx, y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } #endif } template <> inline void CPUCopy<double>(const int N, const double* x, int incx, double* y, int incy) { #ifdef BLAS cblas_dcopy(N, x, incx, y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } #endif } template <> inline void CPUCopy<int>(const int N, const int* x, int incx, int* y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } } template <> inline void CPUCopy<long>(const int N, const long* x, int incx, long* y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } } /// /// \tparam T /// \param N /// \param x /// \param incx /// \param y /// \param incy template <class T> inline void CPUSwap(const int N, T * x, int incx, T *y, int incy ); template <> inline void CPUSwap<float>(const int N, float * x, int incx, float *y, int incy ) { #ifdef BLAS cblas_sswap(N, x, incx, y, incy); #else int posx = 0, posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { std::swap(x[posx], y[posy]); posx += incx; posy += incy; } #endif } template <> inline void CPUSwap<double>(const int N, double * x, int incx, double *y, int incy ) { #ifdef BLAS cblas_dswap(N, x, incx, y, incy); #else int posx = 0, posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { std::swap(x[posx], y[posy]); posx += incx; posy += incy; } #endif } template <class T> inline void CPUSwap(const int N, T * x) { #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N/2; ++i) { std::swap(x[i], x[N-1-i]); } } /// res = x'*y /// \tparam T /// \param N /// \param x /// \param y /// \param res template <class T> inline void CPUDot(const int N, const T* x, const T* y, T& res); template <> inline void CPUDot<float>(const int N, const float* x, const float* y, float& res) { #ifdef BLAS res = cblas_sdot(N, x, 1, y, 1); #else #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } #endif } template <> inline void CPUDot<double>(const int N, const double* x, const double* y, double& res) { #ifdef BLAS res = cblas_ddot(N, x, 1, y, 1); #else #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } #endif } template <> inline void CPUDot<int>(const int N, const int* x, const int* y, int& res) { #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } } template <> inline void CPUDot<long>(const int N, const long* x, const long* y, long& res) { #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } } template <class T> inline void Value(const int N, T* out, T val) { if (val == T(0)) { memset(out, 0, sizeof(T) * N); return; } #ifdef USE_EIGEN Vec<T> vec = create<T>(out, N); vec.fill(val); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] = val; } #endif } template <class T> inline void Scale(const int N, T* out, T val); template <> inline void Scale<float>(const int N, float* out, float val) { #ifdef BLAS cblas_sscal(N, val, out, 1); #elif define(USE_EIGEN) auto v = create<float>(out, N); v = v * val; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } #endif } template <> inline void Scale<double>(const int N, double* out, double val) { #ifdef BLAS cblas_dscal(N, val, out, 1); #elif define(USE_EIGEN) auto v = create<double>(out, N); v = v*val; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } #endif } template <> inline void Scale<int>(const int N, int* out, int val) { #ifdef USE_EIGEN auto v = create<int>(out, N); v *= val; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } #endif } template <> inline void Scale<long>(const int N, long* out, long val) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } } template <class T> inline void Random(const int N, T *out, T mu, T sigma) { gettimeofday(&tv,NULL); std::normal_distribution<T> dist_normal(mu, sigma); rnd_engine_.seed((unsigned int) (tv.tv_sec * 1000 * 1000 + tv.tv_usec)); #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] = dist_normal(rnd_engine_); } } template <class T> inline void Add(const int N, const int M, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N, M); auto bv = create<T>(b, N); auto yv = create<T>(y, N, M); yv = av.colwise() + bv; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { y[i * M +j] = a[i * M +j] + b[i]; } } #endif } template <class T> inline void Add(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = (av + bv); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] + b[i]; } #endif } template <class T> inline void Sub(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = av - bv; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] - b[i]; } #endif } template <class T> inline void Mul(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = av.array() * bv.array(); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] * b[i]; } #endif } template <class T> inline void Div(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = av.array() / bv.array(); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] / b[i]; } #endif } template <class T> inline void Reciprocal(const int N, T *x) { #ifdef USE_EIGEN auto xv = create<T>(x, N); xv /= T(1.0); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { x[i] = T(1.0) / x[i]; } #endif } template <class T> inline void Negative(const int N, T *x) { #ifdef USE_EIGEN auto xv = create<T>(x, N); xv = (T(0) - xv); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { x[i] = -x[i]; } #endif } /// tanh /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Tanh(const int N, const T *x, T *y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = (exp(x[i])-exp(-x[i]))/(exp(x[i]) + exp(-x[i])); } } /// tanh gradient /// \tparam T /// \param N /// \param x /// \param y /// \param z template <class T> inline void TanhGrad(const int N, const T *x, const T *y, T *z) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { z[i] = y[i] * (T(1) - x[i]*x[i]); } } /// sigmoid /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Sigmoid(const int N, const T*x, T *y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = T(1)/(T(1) + exp(T(-1) * x[i])); } } /// sigmoid gradient /// \tparam T /// \param N /// \param x /// \param y /// \param z template <class T> inline void SigmoidGrad(const int N, const T *x, const T *y, T *z) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { z[i] = y[i]*x[i]*((T)1-x[i]); } } /// relu /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Relu(const int N, const T *x, T *y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = (x[i] > T(0) ? x[i] : T(0)); } } /// relu gradient /// \tparam T /// \param N /// \param dx /// \param x /// \param dy template <class T> inline void ReluGrad(const int N, const T *x, const T *dx, T* dy) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { dy[i] = (x[i] > (T)0 ? dx[i] : 0); } } /// softmax /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Softmax(const int N, const T* x, T* y) { const T max = *std::max_element(x, x + N); T sum = (T)0; #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i<N; ++i) { y[i] = std::exp(x[i] - max); sum += y[i]; } #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i<N; ++i) { y[i] /= sum; } } template <class T> inline void SoftmaxGrad(const int N, const int D, const T* x, const T* pre, T* y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { T sum = T(0); for (int j = 0; j < D; ++j) { sum += x[i * D + j] * pre[i * D + j]; } for (int k = 0; k < D; ++k) { y[i * D + k] = x[i * D + k] * (pre[i * D + k] - sum); } } } /// cross-entropy /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M real data length /// \param in2 real value /// \param out template <class T> inline void CrossEntropy(const int N, const T *in1, const int M, const T *in2, T *out) { int class_num = N / M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { int label = (int)in2[i]; int index = i * class_num + label; out[0] += T(-1) * log(in1[index]); } out[0] /= M; } /// cross-entropy gradient /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M real data length /// \param in2 real value /// \param out template <class T> inline void CrossEntropyGrad(const int N, const T *in1, const int M, const T *in2, T *out) { int class_num = N / M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { int label = (int)in2[i]; int index = i * class_num + label; out[index] = T(-1.0) / in1[index]; } } /// rms loss /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M label data length /// \param in2 label value /// \param out template <class T> inline void RMSLoss(const int N, const T *in1, const int M, const T *in2, T *out) { if (N == M) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[0] += T(0.5) * (in1[i] - in2[i]) * (in1[i] - in2[i]); } } else { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { for (int j = 0; j < N / M; ++j) { int idx = static_cast<int>(in2[i]); if (j == idx) { out[0] += T(0.5) * (in1[i] - 1) * (in1[i] - 1); } else { out[0] += T(0.5) * in1[i] * in1[i]; } } } } out[0] /= M; } /// rms loss grad /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M label data length /// \param in2 label value /// \param out template <class T> inline void RMSLossGrad(const int N, const T *in1, const int M, const T *in2, T *out) { if (N == M) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] = (in1[i] - in2[i]); } } else { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { for (int j = 0; j < N / M; ++j) { int idx = static_cast<int>(in2[i]); if (j == idx) { out[i * M + j] = (in1[i * M + j] - 1); } else { out[i * M + j] = in1[i * M + j]; } } } } } template <class T> inline void SoftmaxCrossEntropy(const int N, const T *data, const int M, const T *label, T *out) { int class_num = N/M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { T sum = T(0.0); for (int j = 0; j < class_num; ++j) { sum += exp(data[i * class_num +j]); } out[0] += log(sum) - data[static_cast<int>(label[i])]; } out[0] /= M; } template <class T> inline void SoftmaxCrossEntropyGrad(const int N, const T *data, const int M, const T *label, T *out) { int class_num = N / M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { const T *d = data + i * class_num; T *o = out + i * class_num; Softmax<T>(class_num, d, o); o[static_cast<int>(label[i])] -= 1; } } template <class T> inline void Reduce(const int N, std::function<void(int)> func) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { func(i); } } template <class T> inline void SumCopy(const int N, const T *in, const int M, T *out) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { for (int j = 0; j < N / M; ++j) { out[i] += in[i * N / M + j]; } } } template <class T, int order> inline void Img2Col(const T *input, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, T *output) { if (order == 0) { const int output_h = (height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; // padding = 0; dilation = 1; if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 && pad_t == 0 && pad_b == 0) { for (auto k = 0; k < channels * kernel_h * kernel_w; k++) { const auto nip = k / (kernel_h * kernel_w); const auto rest = k % (kernel_h * kernel_w); const auto kh = rest / kernel_w; const auto kw = rest % kernel_w; auto* dst = output + nip * (kernel_h * kernel_w * output_h * output_w) + kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w); const auto* src = input + nip * (height * width); for (auto y = 0; y < output_h; y++) { const auto iy = y * stride_h + kh; const auto ix = kw; if (stride_w == 1) { memcpy( dst + (y * output_w), src + (iy * width + ix), sizeof(T) * output_w); } else { for (auto x = 0; x < output_w; x++) { memcpy( dst + (y * output_w + x), src + (iy * width + ix + x * stride_w), sizeof(T)); } } } } return; } // equal padding if (pad_l == pad_r && pad_t == pad_b) { const int pad_h = pad_t; const int pad_w = pad_l; const int channel_size = height * width; for (int channel = channels; channel--; input += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!isLess(input_row, height)) { for (int output_cols = output_w; output_cols; output_cols--) { *(output++) = 0; } } else { int input_col = -pad_w + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (isLess(input_col, width)) { *(output++) = input[input_row * width + input_col]; } else { *(output++) = 0; } input_col += stride_w; } } input_row += stride_h; } } } } return; } // base const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int channels_col = channels * kernel_h * kernel_w; for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_w; int h_offset = (c / kernel_w) % kernel_h; int c_im = c / kernel_h / kernel_w; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { int h_pad = h * stride_h - pad_t + h_offset * dilation_h; int w_pad = w * stride_w - pad_l + w_offset * dilation_w; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) { output[(c * height_col + h) * width_col + w] = input[(c_im * height + h_pad) * width + w_pad]; } else { output[(c * height_col + h) * width_col + w] = 0; } } } } } else if (order == 1) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h) { for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { memcpy(output, input + (ih * width + iw) * channels, sizeof(T) * channels); } else { memset(output, 0, sizeof(T) * channels); } output += channels; } } w_pad += stride_w; } h_pad += stride_h; } } else { Logger::Global()->Fatal("Img2Col do not support other image order except NCHW or NHWC \n"); } }; template <class T, int order> inline void Col2Img(const T *input, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, T *output) { memset(output, 0, height * width * channels* sizeof(T)); if (order == 0) { const int output_h = (height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 && pad_t == 0 && pad_b == 0) { for (auto k = 0; k < channels * kernel_h * kernel_w; k++) { const auto nip = k / (kernel_h * kernel_w); const auto rest = k % (kernel_h * kernel_w); const auto kh = rest / kernel_w; const auto kw = rest % kernel_w; const auto* dst = input + nip * (kernel_h * kernel_w * output_h * output_w) + kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w); auto* src = output + nip * (height * width); for (auto y = 0; y < output_h; y++) { const auto iy = y * stride_h + kh; const auto ix = kw; if (stride_w == 1) { auto offsrc = src + (iy * width + ix); const auto offdst = dst + (y * output_w); for (auto i = 0; i < output_w; ++i) { offsrc[i] += offdst[i]; } } else { for (auto x = 0; x < output_w; x++) { auto offsrc = src + (iy * width + ix + x * stride_w); const auto offdst = dst + (y * output_w + x); *offsrc += *offdst; } } } } return; } if (pad_l == pad_r && pad_t == pad_b) { // From Intel, https://github.com/BVLC/caffe/pull/3536 const int pad_h = pad_t; const int pad_w = pad_l; const int channel_size = height * width; for (int channel = channels; channel--; output += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!isLess(input_row, height)) { input += output_w; } else { int input_col = -pad_w + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (isLess(input_col, width)) { output[input_row * width + input_col] += *input; } input++; input_col += stride_w; } } input_row += stride_h; } } } } return; } const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int channels_col = channels * kernel_h * kernel_w; for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_w; int h_offset = (c / kernel_w) % kernel_h; int c_im = c / kernel_h / kernel_w; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { int h_pad = h * stride_h - pad_t + h_offset * dilation_h; int w_pad = w * stride_w - pad_l + w_offset * dilation_w; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) { output[(c_im * height + h_pad) * width + w_pad] += input[(c * height_col + h) * width_col + w]; } } } } } else if (order == 1) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h) { for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { auto* data_im_patch = output + (ih * width + iw) * channels; Add<T>(channels, data_im_patch, input, data_im_patch); } input += channels; } } w_pad += stride_w; } h_pad += stride_h; } } else { Logger::Global()->Fatal("Col2Img do not support other image order except NCHW or NHWC \n"); } }; template <class T> inline void Img2ColNd(const T *input, const int *imageShape, const int *dataShape, const int * kernel, const int *stride, const int * dilation, const int * padding, const int N, T *output, bool col2img = false) { int kernel_size = 1; for (int i = 0; i < N; ++i) { kernel_size *= kernel[i]; } const int channels_col = dataShape[0]; std::vector<int> d_offset(N, 0); std::vector<int> d_iter(N, 0); for (int c_col = 0; c_col < channels_col; ++c_col) { int offset = c_col; for (int d_i = N - 1; d_i >= 0; --d_i) { if (d_i < N - 1) { offset /= kernel[d_i + 1]; } d_offset[d_i] = offset % kernel[d_i]; } for (bool incremented = true; incremented;) { int index_col = c_col; int index_im = c_col / kernel_size; bool is_padding = false; for (int d_i = 0; d_i < N; ++d_i) { const int d = d_iter[d_i]; const int d_im = d * stride[d_i] - padding[d_i] + d_offset[d_i] * dilation[d_i]; is_padding |= d_im < 0 || d_im >= imageShape[d_i + 1]; index_col *= dataShape[d_i + 1]; index_col += d; index_im *= imageShape[d_i + 1]; index_im += d_im; } if (!col2img) { if (is_padding) { output[index_col] = 0; } else { output[index_col] = input[index_im]; } } else if (!is_padding) { // col2im output[index_im] += input[index_col]; } incremented = false; for (int d_i = N - 1; d_i >= 0; --d_i) { const int d_max = dataShape[d_i + 1]; if (d_iter[d_i] < d_max) { Logger::Global()->Fatal("Img2ColNd d_iter[%d] less then d_max\n", d_i); } if (d_iter[d_i] == d_max - 1) { d_iter[d_i] = 0; } else { // d_iter[d_i] < d_max - 1 ++d_iter[d_i]; incremented = true; break; } } } } }; template <class T> inline void Col2ImgNd(const T *input, const int *imageShape, const int *dataShape, const int * kernel, const int *stride, const int * dilation, const int * padding, const int N, T *output) { int imageSize = 1; for (int i = 0; i < N; ++i) { imageSize *= imageShape[i]; } memset(output, 0, sizeof(T) * imageSize); Img2ColNd(input, imageShape, dataShape, kernel, stride, dilation, padding, N, output, true); } template<class T> inline void img2col(const T *input, const int input_channels, const int input_width, const int input_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, T *output) { const int output_width = (input_width + 2 * padding_width - (dilation_width * (filter_width - 1) + 1)) / stride_width + 1; const int output_height = (input_height + 2 * padding_height - (dilation_height * (filter_height - 1) + 1)) / stride_height + 1; const int col_channels = input_channels * filter_width * filter_height; #ifdef USE_MP #pragma omp parallel for #endif for (int c = 0; c < col_channels; ++c) { int w_offset = c % filter_width; int h_offset = (c / filter_width) % filter_height; int c_im = c / filter_width / filter_height; for (int h = 0; h < output_height; ++h) { for (int w = 0; w < output_width; ++w) { int imRowIdx = h * stride_height + h_offset * dilation_height; int imColIdx = w * stride_width + w_offset * dilation_width; if ((imRowIdx - padding_height) < 0 || (imRowIdx - padding_height) >= input_height || (imColIdx - padding_width) < 0 || (imColIdx - padding_width) >= input_width) { output[(c * output_height + h) * output_width + w] = T(0); } else { imRowIdx += c_im * input_height - padding_height; imColIdx -= padding_width; output[(c * output_height + h) * output_width + w] = input[imRowIdx * input_width + imColIdx]; } } } } } template<class T> inline void col2img(T *input, const int input_channels, const int input_width, const int input_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, const T *output) { const int output_width = (input_width + 2 * padding_width - (dilation_width * (filter_width - 1) + 1)) / stride_width + 1; const int output_height = (input_height + 2 * padding_height - (dilation_height * (filter_height - 1) + 1)) / stride_height + 1; const int col_channels = input_channels * filter_width * filter_height; #ifdef USE_MP #pragma omp parallel for #endif for (int c = 0; c < col_channels; ++c) { int w_offset = c % filter_width; int h_offset = (c / filter_width) % filter_height; int c_im = c / filter_width / filter_height; for (int h = 0; h < output_height; ++h) { for (int w = 0; w < output_width; ++w) { int imRowIdx = h * stride_height + h_offset * dilation_height; int imColIdx = w * stride_width + w_offset * dilation_width; imRowIdx -= padding_height; imColIdx -= padding_width; if (imRowIdx >= 0 && imRowIdx < input_height && imColIdx >= 0 && imColIdx < input_width) { int input_idx = (imRowIdx + c_im * input_height) * input_width + imColIdx; int output_idx = (c * output_height + h) * output_width + w; input[input_idx] += output[output_idx]; } } } } } template<class T> inline void NaiveConv(const T *input, const int batch_size, const int input_channels, const int input_width, const int input_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, const int output_channels, const T *filter, T *output) { const int output_width = (input_width + 2 * padding_width - (dilation_width * (filter_width - 1) + 1)) / stride_width + 1; const int output_height = (input_height + 2 * padding_height - (dilation_height * (filter_height - 1) + 1)) / stride_height + 1; #ifdef USE_MP #pragma omp parallel for #endif for (int batch = 0; batch < batch_size; ++batch) { for (int out_channel = 0; out_channel <output_channels ; ++out_channel) { for (int out_h = 0; out_h < output_height; ++out_h) { for (int out_w = 0; out_w < output_width; ++out_w) { const int inStartH = (out_h * stride_height) - padding_height; const int inStartW = (out_w * stride_width) - padding_width; T outValue = (T)0; for (int in_channel = 0; in_channel < input_channels; ++in_channel) { for (int filter_h = 0; filter_h < filter_height; ++filter_h) { for (int filter_w = 0; filter_w < filter_width; ++filter_w) { T inValue; const int inH = inStartH + filter_h; const int inW = inStartW + filter_w; if ((inH >= 0 && inH < input_height) && (inW >= 0 && inW < input_width)) { int offsetInput = batch * input_channels * input_height * input_width + in_channel * input_height * input_width + inH * input_width + inW; inValue = input[offsetInput]; } else { inValue = (T)0; } int offsetFilter = out_channel * input_channels * filter_height * filter_width + in_channel * filter_height * filter_width + filter_h * filter_width + filter_w; T filterValue = filter[offsetFilter]; outValue += (inValue * filterValue); } } } int offset = batch * output_channels * output_height * output_width + out_channel * output_height * output_width + out_h * output_width + out_w; output[offset] = outValue; } } } } } template <class T> inline void pooling2D(const T *input, const int batch_size, const int channel, const int input_width, const int input_height, const int output_width, const int output_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, T *output, int type = 0, T *mask = nullptr) { const int input_stride = input_height * input_width; const int output_stride = output_height * output_width; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < batch_size; ++i) { for (int c = 0; c < channel; ++c) { for (int ph = 0; ph < output_height; ++ph) { int hstart = ph * stride_height - padding_height; int hend = std::min(hstart + filter_height, input_height); hstart = std::max(hstart, 0); for (int pw = 0; pw < output_width; ++pw) { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + filter_width, input_width); wstart = std::max(wstart, 0); T ele; if (type == 0) { ele = input[hstart * input_width + wstart]; int index = hstart * input_width + wstart; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input[h * input_width + w]) { ele = input[h * input_width + w]; index = h * input_width + w; } } } output[ph * output_width + pw] = ele; if (mask != nullptr) { mask[ph * output_width + pw] = T(index); } } else if (type == 1) { ele = T(0); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { ele += input[h * input_width + w]; } } output[ph * output_width + pw] = ele / (hend * wend); } else { Logger::Global()->Fatal("not Implementation Pooling2D with other PoolType"); } } } input += input_stride; output += output_stride; if (mask != nullptr) { mask += output_stride; } } } } template <class T> inline void NHWC2NCHW(const T * input, const int num, const int inH, const int inW, const int inC, T *output) { #ifdef USE_MP #pragma omp parallel for #endif for (int n = 0; n < num; ++n) { for (int h = 0; h < inH; ++h) { for (int w = 0; w < inW; ++w) { for (int c = 0; c < inC; ++c) { output[((n * inC + c) * inH + h) * inW + w] = *(input++); } } } } } template <class T> inline void NCHW2NHWC(const T * input, const int num, const int inC, const int inH, const int inW, T *output) { #ifdef USE_MP #pragma omp parallel for #endif for (int n = 0; n < num; ++n) { for (int c = 0; c < inC; ++c) { for (int h = 0; h < inH; ++h) { for (int w = 0; w < inW; ++w) { output[((n * inH + h) * inW + w) * inC + c] = *(input++); } } } } } template <class I, class R> inline std::vector<R> Map(std::function<R(I)> fun, const std::vector<I>& vec) { std::vector<R> res; res.reserve(vec.size()); #ifdef USE_MP #pragma omp parallel for #endif for (auto& i : vec) { res.push_back(fun(i)); } return res; }; template <typename R, typename I> inline std::vector<R> Map(std::function<R(I)> fun, std::vector<I>&& vec) { return Map<R, I>(fun, vec); } template <typename I> inline I Reduce(std::function<I(const I&, const I&)> func, I initVal, const std::vector<I>& vec) { I res = initVal; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < vec.size(); ++i) { res = func(res, vec.at(i)); } return res; } template <typename I> inline I Reduce(std::function<I(const I&, const I&)> func, const std::vector<I>& vec) { const std::vector<I> v(vec.begin() + 1, vec.end()); return Reduce(func, vec.at(0), v); } template <typename I> inline I Reduce(std::function<I(I&&, I&&)> func, I&& initVal, std::vector<I>&& vec) { #ifdef USE_MP #pragma omp parallel for #endif I res = std::move(initVal); for (int i = 0; i < vec.size(); ++i) { res = func(std::move(res), std::move(vec.at(i))); } return res; } template <typename I> I Reduce(std::function<I(I&&, I&&)> func, std::vector<I>&& vec) { #ifdef USE_MP #pragma omp parallel for #endif I res = std::move(vec.at(0)); for (int i = 1; i < vec.size(); ++i) { res = func(std::move(res), std::move(vec.at(i))); } return res; } template <typename R, typename I> inline R MapReduce(std::function<R(R, I, bool)> func, const std::vector<I>& vec) { #ifdef USE_MP #pragma omp parallel for #endif R res = func(R(), vec.at(0), true); for (int i = 1; i < vec.size(); ++i) { res = func(res, vec.at(i), false); } return res; } template< class T> inline std::vector<T> Filter(std::function<bool(const T)> func, const std::vector<T> &input) { std::vector<T> res; res.reserve(input.size()); for (const auto &i : input) { if (func(i)) { res.push_back(i); } } res.shrink_to_fit(); return res; } } #endif //MATRIX_MATH_H
GB_unop__identity_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_fc64) // op(A') function: GB (_unop_tran__identity_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: double cij = (double) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) creal (aij) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; double z = (double) creal (aij) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; double z = (double) creal (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lndsr.c
#include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <string.h> #include <unistd.h> #include <math.h> #include "lndsr.h" #include "keyvalue.h" #include "const.h" #include "param.h" #include "input.h" #include "prwv_input.h" #include "lut.h" #include "output.h" #include "sr.h" #include "ar.h" #include "bool.h" #include "error.h" #include "clouds.h" #include "read_grib_tools.h" #include "sixs_runs.h" #define AERO_NB_BANDS 3 #define SP_INDEX 0 #define WV_INDEX 1 #define ATEMP_INDEX 2 #define OZ_INDEX 0 #define DEBUG_FLAG 0 /* #define DEBUG_AR 0 */ /* #define DEBUG_CLD 1 */ /* DEM Definition: U_char format, 1 count = 100 meters */ /* 0 = 0 meters */ #define DEMFILE "CMGDEM.hdf" #define DEM_NBLAT 3600 #define DEM_DLAT 0.05 #define DEM_LATMIN (-90.0) #define DEM_LATMAX 90.0 #define DEM_NBLON 7200 #define DEM_DLON 0.05 #define DEM_LONMIN (-180.0) #define DEM_LONMAX 180.0 #define P_DFTVALUE 1013.0 /* Type definitions */ atmos_t atmos_coef; #ifdef DEBUG_AR FILE *fd_ar_diags = NULL; int diags_il_ar; #endif #ifdef DEBUG_CLD FILE *fd_cld_diags = NULL; #endif /* reading the DEM in hdf */ int32 sds_file_id,sds_id,status; char sds_name[256]; int32 sds_index; int32 dim_sizes[2],start[2],stride[2],edges[2]; int32 data_type,n_attrs,rank; /* Prototypes */ #ifndef HPUX #define chand chand_ #define csalbr csalbr_ #endif void chand(float *phi,float *muv,float *mus,float *tau_ray,float *actual_rho_ray); void csalbr(float *tau_ray,float *actual_S_r); int update_atmos_coefs(atmos_t *atmos_coef,Ar_gridcell_t *ar_gridcell, sixs_tables_t *sixs_tables,int ***line_ar,Lut_t *lut,int nband, int bkgd_aerosol); int update_gridcell_atmos_coefs(int irow,int icol,atmos_t *atmos_coef,Ar_gridcell_t *ar_gridcell, sixs_tables_t *sixs_tables,int **line_ar,Lut_t *lut,int nband, int bkgd_aerosol); float calcuoz(short jday,float flat); float get_dem_spres(short *dem,float lat,float lon); #ifdef SAVE_6S_RESULTS #define SIXS_RESULTS_FILENAME "SIXS_RUN_RESULTS.TXT" int read_6S_results_from_file(char *filename,sixs_tables_t *sixs_tables); int write_6S_results_to_file(char *filename,sixs_tables_t *sixs_tables); #endif void sun_angles (short jday,float gmt,float flat,float flon,float *ts,float *fs); /* Functions */ int main (int argc, char *argv[]) { Param_t *param = NULL; Input_t *input = NULL, *input_b6 = NULL; InputPrwv_t *prwv_input = NULL; InputOzon_t *ozon_input = NULL; Lut_t *lut = NULL; Output_t *output = NULL; int i,j,il, is,ib,i_aot,j_aot,ifree; int il_start, il_end, il_ar, il_region, is_ar; int16 *line_out[NBAND_SR_MAX]; int16 *line_out_buf = NULL; int16 ***line_in = NULL; int16 **line_in_band_buf = NULL; int16 *line_in_buf = NULL; int ***line_ar = NULL; int **line_ar_band_buf = NULL; int *line_ar_buf = NULL; int16** b6_line = NULL; int16* b6_line_buf = NULL; float *atemp_line = NULL; uint8** qa_line = NULL; uint8* qa_line_buf = NULL; char **ddv_line = NULL; char *ddv_line_buf = NULL; char **rot_cld[3],**ptr_rot_cld[3],**ptr_tmp_cld; char **rot_cld_block_buf = NULL; char *rot_cld_buf = NULL; char envi_file[STR_SIZE]; /* name of the output ENVI header file */ char *cptr = NULL; /* pointer to the file extension */ bool refl_is_fill; Sr_stats_t sr_stats; Ar_stats_t ar_stats; Ar_gridcell_t ar_gridcell; float *prwv_in[NBAND_PRWV_MAX]; float *prwv_in_buf = NULL; int *ozon_in = NULL; float corrected_sun_az; /* (degrees) sun azimuth angle has been corrected for polar scenes that are ascending or flipped */ int nbpts; int inter_aot; /* atmospheric opacity */ float scene_gmt; Geoloc_t *space = NULL; Space_def_t space_def; char *dem_name = NULL; Img_coord_float_t img; Img_coord_int_t loc; Geo_coord_t geo; t_ncep_ancillary anc_O3,anc_WV,anc_SP,anc_ATEMP; double sum_spres_anc,sum_spres_dem; int nb_spres_anc,nb_spres_dem; float tmpflt_arr[4]; double coef; int tmpint; int osize; int debug_flag; sixs_tables_t sixs_tables; float center_lat,center_lon; char tmpfilename[128]; FILE *fdtmp/*, *fdtmp2 */; int tmpid; /* file ID for temporary file (ID not used) */ short *dem_array; int dem_available; cld_diags_t cld_diags; float flat,flon/*,fts,ffs*/; double delta_y,delta_x; float adjust_north; float sum_value,sumsq_value; int no_ozone_file; short jday; Espa_internal_meta_t xml_metadata; /* XML metadata structure */ Espa_global_meta_t *gmeta = NULL; /* pointer to global meta */ Envi_header_t envi_hdr; /* output ENVI header information */ /* Vermote additional variable declaration for the cloud mask May 29 2007 */ float t6s_seuil; debug_flag= DEBUG_FLAG; no_ozone_file=0; /* Read the parameters from the command-line and input parameter file */ param = GetParam(argc, argv); if (param == NULL) EXIT_ERROR("getting runtime parameters", "main"); printf ("\nRunning lndsr ....\n"); /* Validate the input metadata file */ if (validate_xml_file (param->input_xml_file_name) != SUCCESS) { /* Error messages already written */ EXIT_ERROR("Unable to validate XML file", "main"); } /* Initialize the metadata structure */ init_metadata_struct (&xml_metadata); /* Parse the metadata file into our internal metadata structure; also allocates space as needed for various pointers in the global and band metadata */ if (parse_metadata (param->input_xml_file_name, &xml_metadata) != SUCCESS) { /* Error messages already written */ EXIT_ERROR("parsing XML file", "main"); } gmeta = &xml_metadata.global; /* pointer to global meta */ /* Open input files; grab QA band for reflectance band */ input = OpenInput(&xml_metadata, false /* not thermal */); if (input == NULL) EXIT_ERROR("bad input file", "main"); input_b6 = OpenInput(&xml_metadata, true /* thermal */); if (input_b6 == NULL) { param->thermal_band = false; printf ("WARNING: no TOA brightness temp band available. " "Processing without."); } else param->thermal_band = true; if (param->num_prwv_files > 0 && param->num_ncep_files > 0) { EXIT_ERROR("both PRWV and PRWV_FIL files specified", "main"); } /* The surface reflectance algorithm cannot be implemented for solar zenith angles greater than 76 degrees. Need to flag if the current scene falls into that category. */ if (input->meta.sun_zen * DEG > 76.0) { EXIT_ERROR ("Solar zenith angle is too large to allow for surface " "reflectance processing. Corrections must be limited to top-of-" "atmosphere reflectance and brightness temperature corrections. " "Use the --process_sr=False command-line argument when running " "do_ledaps.py.", "main"); } /* Open prwv input file */ if (param->num_prwv_files > 0) { prwv_input = OpenInputPrwv(param->prwv_file_name); if (prwv_input==NULL) EXIT_ERROR("bad input prwv file","main"); osize= 3 * (prwv_input->size.ntime*prwv_input->size.nlat* prwv_input->size.nlon); prwv_in_buf = calloc(osize, sizeof(float)); if (prwv_in_buf == NULL) EXIT_ERROR("allocating input prwv buffer", "main"); prwv_in[0] = prwv_in_buf; for (ib = 1; ib < prwv_input->nband; ib++) prwv_in[ib] = prwv_in[ib - 1] + (prwv_input->size.ntime* prwv_input->size.nlat*prwv_input->size.nlon); for (ib = 0; ib < prwv_input->nband; ib++) { if (!GetInputPrwv(prwv_input, ib, prwv_in[ib])) EXIT_ERROR("reading input prwv data", "main"); } /**** ozone ***/ if ( param->num_ozon_files<1 ) no_ozone_file=1; else { ozon_input = OpenInputOzon(param->ozon_file_name); if (ozon_input==NULL) EXIT_ERROR("bad input ozon file", "main"); osize= (ozon_input->size.ntime*ozon_input->size.nlat* ozon_input->size.nlon); ozon_in = calloc(osize, sizeof(int)); if (ozon_in == NULL) EXIT_ERROR("allocating input ozone buffer", "main"); if (!GetInputOzon(ozon_input, 0, ozon_in)) EXIT_ERROR("reading input ozone data", "main"); } } /* Get Lookup table, based on reflectance information */ lut = GetLut(input->nband, &input->meta, &input->size); if (lut == NULL) EXIT_ERROR("bad lut file", "main"); /* Get geolocation space definition */ if (!get_geoloc_info(&xml_metadata, &space_def)) EXIT_ERROR("getting space metadata from XML file", "main"); space = setup_mapping(&space_def); if (space == NULL) EXIT_ERROR("getting setting up geolocation mapping", "main"); printf ("Number of input bands: %d\n", input->nband); printf ("Number of input lines: %d\n", input->size.l); printf ("Number of input samples: %d\n", input->size.s); /* If the scene is an ascending polar scene (flipped upside down), then the solar azimuth needs to be adjusted by 180 degrees. The scene in this case would be north down and the solar azimuth is based on north being up. */ corrected_sun_az = input->meta.sun_az * DEG; if (gmeta->ul_corner[0] < gmeta->lr_corner[0]) { corrected_sun_az += 180.0; if (corrected_sun_az > 360.0) corrected_sun_az -= 360.0; printf ("Polar or ascending scene. Readjusting solar azimuth by " "180 degrees.\n New value: %f radians (%f degrees)\n", corrected_sun_az*RAD, corrected_sun_az); } /* Open the output files and set up the necessary information for appending to the XML file */ output = OpenOutput(&xml_metadata, input, param, lut); if (output == NULL) EXIT_ERROR("opening output file", "main"); /* Open diagnostics files if needed */ #ifdef DEBUG_AR strcpy(tmpfilename,param->output_file_name); strcat(tmpfilename,".DEBUG_AR"); fd_ar_diags=fopen(tmpfilename,"w"); if (fd_ar_diags != NULL) { fprintf(fd_ar_diags,"cell_row cell_col total_nb_samples avg_b1 std_b1 avg_b2 std_b2 avg_b3 std_b3 avg_b7 std_b7 szen vzen relaz wv ozone spres fraction_water fraction_clouds fraction_cldshadow fraction_snow spres_ratio tau_ray corrected_T_ray corrected_Sr measured_rho_b1 simulated_b1_01 simulated_b1_02 simulated_b1_03 simulated_b1_04 simulated_b1_05 simulated_b1_06 simulated_b1_07 simulated_b1_08 simulated_b1_09 simulated_b1_10 simulated_b1_11 simulated_b1_12 simulated_b1_13 simulated_b1_14 simulated_b1_15 aot_index coef aot_value new_aot ratio_neg_red\n"); } #endif #ifdef DEBUG_CLD strcpy(tmpfilename,"tempfile.DEBUG_CLD"); fd_cld_diags=fopen(tmpfilename,"w"); if (fd_cld_diags != NULL) { fprintf(fd_cld_diags,"cell_row cell_col nb_samples airtemp_2m " "avg_t6_clear std_t6_clear avg_b7_clear std_b7_clear\n"); } #endif /* Allocate memory for input lines */ line_in = calloc(lut->ar_region_size.l, sizeof(int16 **)); if (line_in == NULL) EXIT_ERROR("allocating input line buffer (a)", "main"); line_in_band_buf = calloc(lut->ar_region_size.l * input->nband, sizeof(int16 *)); if (line_in_band_buf == NULL) EXIT_ERROR("allocating input line buffer (b)", "main"); line_in_buf = calloc(input->size.s * lut->ar_region_size.l * input->nband, sizeof(int16)); if (line_in_buf == NULL) EXIT_ERROR("allocating input line buffer (c)", "main"); for (il = 0; il < lut->ar_region_size.l; il++) { line_in[il] = line_in_band_buf; line_in_band_buf += input->nband; for (ib = 0; ib < input->nband; ib++) { line_in[il][ib] = line_in_buf; line_in_buf += input->size.s; } } /* Allocate memory for qa line */ qa_line = calloc(lut->ar_region_size.l,sizeof(uint8 *)); if (qa_line == NULL) EXIT_ERROR("allocating qa line", "main"); qa_line_buf = calloc(input->size.s * lut->ar_region_size.l, sizeof(uint8)); if (qa_line_buf == NULL) EXIT_ERROR("allocating qa line buffer", "main"); for (il = 0; il < lut->ar_region_size.l; il++) { qa_line[il]=qa_line_buf; qa_line_buf += input->size.s; } /* Allocate memory for one band 6 line */ if (param->thermal_band) { b6_line = calloc(lut->ar_region_size.l,sizeof(int16 *)); if (b6_line == NULL) EXIT_ERROR("allocating b6 line", "main"); b6_line_buf = calloc(input_b6->size.s * lut->ar_region_size.l, sizeof(int16)); if (b6_line_buf == NULL) EXIT_ERROR("allocating b6 line buffer", "main"); for (il = 0; il < lut->ar_region_size.l; il++) { b6_line[il]=b6_line_buf; b6_line_buf += input_b6->size.s; } } /* Allocate memory for one air temperature line */ atemp_line = calloc(input->size.s,sizeof(float)); if (atemp_line == NULL) EXIT_ERROR("allocating atemp line", "main"); /* Allocate memory for ddv line */ ddv_line = calloc(lut->ar_region_size.l,sizeof(char *)); if (ddv_line == NULL) EXIT_ERROR("allocating ddv line", "main"); ddv_line_buf = calloc(input->size.s * lut->ar_region_size.l, sizeof(char)); if (ddv_line_buf == NULL) EXIT_ERROR("allocating ddv line buffer", "main"); for (il = 0; il < lut->ar_region_size.l; il++) { ddv_line[il]=ddv_line_buf; ddv_line_buf += input->size.s; } /* Allocate memory for rotating cloud buffer */ rot_cld_buf=calloc(input->size.s*lut->ar_region_size.l*3, sizeof(char)); if (rot_cld_buf == NULL) EXIT_ERROR("allocating roatting cloud buffer (a)", "main"); rot_cld_block_buf=calloc(lut->ar_region_size.l*3, sizeof(char *)); if (rot_cld_block_buf == NULL) EXIT_ERROR("allocating rotating cloud buffer (b)", "main"); for (ib = 0; ib < 3; ib++) { rot_cld[ib]=rot_cld_block_buf; rot_cld_block_buf += lut->ar_region_size.l; for (il = 0; il < lut->ar_region_size.l; il++) { rot_cld[ib][il]=rot_cld_buf; rot_cld_buf+=input->size.s; } } /* Allocate memory for ar_gridcell */ ar_gridcell.nbrows=lut->ar_size.l; ar_gridcell.nbcols=lut->ar_size.s; ar_gridcell.lat=calloc(lut->ar_size.s * lut->ar_size.l, sizeof(float)); if (ar_gridcell.lat == NULL) EXIT_ERROR("allocating ar_gridcell.lat", "main"); ar_gridcell.lon=calloc(lut->ar_size.s * lut->ar_size.l, sizeof(float)); if (ar_gridcell.lon == NULL) EXIT_ERROR("allocating ar_gridcell.lon", "main"); ar_gridcell.sun_zen=calloc(lut->ar_size.s * lut->ar_size.l, sizeof(float)); if (ar_gridcell.sun_zen == NULL) EXIT_ERROR("allocating ar_gridcell.sun_zen", "main"); ar_gridcell.view_zen=calloc(lut->ar_size.s * lut->ar_size.l,sizeof(float)); if (ar_gridcell.view_zen == NULL) EXIT_ERROR("allocating ar_gridcell.view_zen", "main"); ar_gridcell.rel_az=calloc(lut->ar_size.s * lut->ar_size.l,sizeof(float)); if (ar_gridcell.rel_az == NULL) EXIT_ERROR("allocating ar_gridcell.rel_az", "main"); ar_gridcell.wv=calloc(lut->ar_size.s * lut->ar_size.l,sizeof(float)); if (ar_gridcell.wv == NULL) EXIT_ERROR("allocating ar_gridcell.wv", "main"); ar_gridcell.spres=calloc(lut->ar_size.s * lut->ar_size.l,sizeof(float)); if (ar_gridcell.spres == NULL) EXIT_ERROR("allocating ar_gridcell.spres", "main"); ar_gridcell.ozone=calloc(lut->ar_size.s * lut->ar_size.l,sizeof(float)); if (ar_gridcell.ozone == NULL) EXIT_ERROR("allocating ar_gridcell.ozone", "main"); ar_gridcell.spres_dem=calloc(lut->ar_size.s * lut->ar_size.l,sizeof(float)); if (ar_gridcell.spres_dem == NULL) EXIT_ERROR("allocating ar_gridcell.spres_dem", "main"); /* Allocate memory for output lines */ line_out_buf = calloc(output->size.s * output->nband_out, sizeof(int16)); if (line_out_buf == NULL) EXIT_ERROR("allocating output line buffer", "main"); line_out[0] = line_out_buf; for (ib = 1; ib < output->nband_out; ib++) line_out[ib] = line_out[ib - 1] + output->size.s; /* Allocate memory for the aerosol lines */ line_ar = calloc(lut->ar_size.l, sizeof(int **)); if (line_ar == NULL) EXIT_ERROR("allocating aerosol line buffer (a)", "main"); line_ar_band_buf = calloc(lut->ar_size.l * AERO_NB_BANDS, sizeof(int *)); if (line_ar_band_buf == NULL) EXIT_ERROR("allocating aerosol line buffer (b)", "main"); line_ar_buf = calloc(lut->ar_size.l * lut->ar_size.s * AERO_NB_BANDS, sizeof(int)); if (line_ar_buf == NULL) EXIT_ERROR("allocating aerosol line buffer (c)", "main"); for (il = 0; il < lut->ar_size.l; il++) { line_ar[il] = line_ar_band_buf; line_ar_band_buf += AERO_NB_BANDS; for (ib = 0; ib < AERO_NB_BANDS; ib++) { line_ar[il][ib] = line_ar_buf; line_ar_buf += lut->ar_size.s; } } /* Initialize the statistics */ ar_stats.nfill = 0; ar_stats.first = true; for (ib = 0; ib < output->nband_out; ib++) { sr_stats.nfill[ib] = 0; sr_stats.nsatu[ib] = 0; sr_stats.nout_range[ib] = 0; sr_stats.first[ib] = true; } /**** Get center lat lon and deviation from true north ****/ img.l=input->size.l/2.; img.s=input->size.s/2.; img.is_fill=false; if (!from_space(space, &img, &geo)) EXIT_ERROR("mapping from space (0)", "main"); center_lat=geo.lat * DEG; center_lon=geo.lon * DEG; /* Compute scene gmt time */ if ( (input->meta.acq_date.hour !=0 ) || (input->meta.acq_date.minute != 0 ) || (input->meta.acq_date.second !=0)) scene_gmt=input->meta.acq_date.hour + input->meta.acq_date.minute/60. + input->meta.acq_date.second/3600.; else scene_gmt=10.5-center_lon/15.; if ( scene_gmt < 0.) scene_gmt=scene_gmt+24.; printf ("Acquisition Time: %02d:%02d:%fZ\n", input->meta.acq_date.hour, input->meta.acq_date.minute, input->meta.acq_date.second); /* Read PRWV Data */ if ( param->num_prwv_files > 0 ) { if (!get_prwv_anc(&anc_SP,prwv_input,prwv_in[SP_INDEX],SP_INDEX)) EXIT_ERROR("Can't get PRWV SP data","main"); if (!get_prwv_anc(&anc_WV,prwv_input,prwv_in[WV_INDEX],WV_INDEX)) EXIT_ERROR("Can't get PRWV WV data","main"); if (!get_prwv_anc(&anc_ATEMP,prwv_input,prwv_in[ATEMP_INDEX], ATEMP_INDEX)) EXIT_ERROR("Can't get PRWV ATEMP data","main"); if (!no_ozone_file) if (!get_ozon_anc(&anc_O3,ozon_input,ozon_in,OZ_INDEX)) EXIT_ERROR("Can't get OZONE data","main"); } else if ( param->num_ncep_files > 0 ) { anc_O3.data[0]=NULL; anc_O3.data[1]=NULL; anc_O3.data[2]=NULL; anc_O3.data[3]=NULL; anc_O3.nblayers=4; anc_O3.timeres=6; strcpy (anc_O3.source, "N/A"); strcpy(anc_O3.filename[0],param->ncep_file_name[0]); strcpy(anc_O3.filename[1],param->ncep_file_name[1]); strcpy(anc_O3.filename[2],param->ncep_file_name[2]); strcpy(anc_O3.filename[3],param->ncep_file_name[3]); if (read_grib_anc(&anc_O3,TYPE_OZONE_DATA)) EXIT_ERROR("Can't read NCEP Ozone data","main"); anc_WV.data[0]=NULL; anc_WV.data[1]=NULL; anc_WV.data[2]=NULL; anc_WV.data[3]=NULL; anc_WV.nblayers=4; anc_WV.timeres=6; strcpy (anc_WV.source, "N/A"); strcpy(anc_WV.filename[0],param->ncep_file_name[0]); strcpy(anc_WV.filename[1],param->ncep_file_name[1]); strcpy(anc_WV.filename[2],param->ncep_file_name[2]); strcpy(anc_WV.filename[3],param->ncep_file_name[3]); if (read_grib_anc(&anc_WV,TYPE_WV_DATA)) EXIT_ERROR("Can't read NCEP WV data","main"); anc_SP.data[0]=NULL; anc_SP.data[1]=NULL; anc_SP.data[2]=NULL; anc_SP.data[3]=NULL; anc_SP.nblayers=4; anc_SP.timeres=6; strcpy (anc_SP.source, "N/A"); strcpy(anc_SP.filename[0],param->ncep_file_name[0]); strcpy(anc_SP.filename[1],param->ncep_file_name[1]); strcpy(anc_SP.filename[2],param->ncep_file_name[2]); strcpy(anc_SP.filename[3],param->ncep_file_name[3]); if (read_grib_anc(&anc_SP,TYPE_SP_DATA)) EXIT_ERROR("Can't read NCEP SP data","main"); anc_ATEMP.data[0]=NULL; anc_ATEMP.data[1]=NULL; anc_ATEMP.data[2]=NULL; anc_ATEMP.data[3]=NULL; anc_ATEMP.nblayers=4; anc_ATEMP.timeres=6; strcpy (anc_ATEMP.source, "N/A"); strcpy(anc_ATEMP.filename[0],param->ncep_file_name[0]); strcpy(anc_ATEMP.filename[1],param->ncep_file_name[1]); strcpy(anc_ATEMP.filename[2],param->ncep_file_name[2]); strcpy(anc_ATEMP.filename[3],param->ncep_file_name[3]); if (read_grib_anc(&anc_ATEMP,TYPE_ATEMP_DATA)) EXIT_ERROR("Can't read NCEP SP data","main"); } else { EXIT_ERROR("No input NCEP or PRWV data specified","main"); } /* Convert the units */ /* convert Pascals into millibars (divide by 100) */ for (i=0;i<anc_SP.nblayers;i++) for (j=0;j<anc_SP.nbrows*anc_SP.nbcols;j++) anc_SP.data[i][j] *= 0.01; /* convert original PRWV kg/m2 into g/cm2 (divide by 10) */ for (i=0;i<anc_WV.nblayers;i++) for (j=0;j<anc_WV.nbrows*anc_WV.nbcols;j++) anc_WV.data[i][j] *= 0.1; /* convert O3 to cm-atm (divide by 1000) */ if (!no_ozone_file) { for (i=0;i<anc_O3.nblayers;i++) for (j=0;j<anc_O3.nbrows*anc_O3.nbcols;j++) anc_O3.data[i][j] *= 0.001; } /* read DEM file */ dem_name= (char*)(param->dem_flag ? param->dem_file : DEMFILE ); /* Open file for SD access */ sds_file_id = SDstart((char *)dem_name, DFACC_RDONLY); if (sds_file_id == HDF_ERROR) { EXIT_ERROR("opening dem_file", "OpenDem"); } sds_index=0; sds_id= SDselect(sds_file_id,sds_index); status= SDgetinfo(sds_id, sds_name, &rank, dim_sizes, &data_type,&n_attrs); start[0]=0; start[1]=0; edges[0]=3600; /* number of lines in the DEM data */ edges[1]=7200; /* number of samples in the DEM data */ stride[0]=1; stride[1]=1; dem_array=(short *)malloc(DEM_NBLAT*DEM_NBLON*sizeof(short)); status=SDreaddata(sds_id,start, stride, edges,dem_array); if (status != 0 ) { printf("Fatal error DEM file not read\n"); exit(EXIT_FAILURE); } dem_available=1; /* Print the ancillary metadata info */ if ( debug_flag ) { print_anc_data(&anc_SP,"SP_DATA"); print_anc_data(&anc_WV,"WV_DATA"); print_anc_data(&anc_ATEMP,"ATEMP_DATA"); if (!no_ozone_file) print_anc_data(&anc_O3,"OZONE_DATA"); } print_anc_data(&anc_O3,"OZONE_DATA"); /**** Get center lat lon and deviation from true north ****/ img.l=input->size.l/2.; img.s=input->size.s/2.; img.is_fill=false; if (!from_space(space, &img, &geo)) EXIT_ERROR("mapping from space (0)", "main"); center_lat=geo.lat * DEG; center_lon=geo.lon * DEG; printf ("(y0,x0)=(%d,%d) (lat0,lon0)=(%f,%f)\n", (int)img.l,(int)img.s,(float)(geo.lat * DEG),(float)(geo.lon * DEG)); delta_y=img.l; delta_x=img.s; img.l=input->size.l/2.-100.; img.s=input->size.s/2.; img.is_fill=false; if (!from_space(space, &img, &geo)) EXIT_ERROR("mapping from space (0)", "main"); geo.lon=center_lon*RAD; geo.is_fill=false; if (!to_space(space, &geo, &img)) EXIT_ERROR("mapping to space (0)", "main"); delta_y = delta_y - img.l; delta_x = img.s - delta_x; adjust_north=(float)(atan(delta_x/delta_y)*DEG); printf("True North adjustment = %f\n",adjust_north); #ifdef SAVE_6S_RESULTS if (read_6S_results_from_file(SIXS_RESULTS_FILENAME,&sixs_tables)) { #endif /**** Run 6S and compute atmcor params ****/ /* printf ("DEBUG: Interpolating WV at scene center ...\n"); */ interpol_spatial_anc(&anc_WV,center_lat,center_lon,tmpflt_arr); tmpint=(int)(scene_gmt/anc_WV.timeres); if (tmpint>=(anc_WV.nblayers-1)) tmpint=anc_WV.nblayers-2; coef=(double)(scene_gmt-anc_WV.time[tmpint])/anc_WV.timeres; sixs_tables.uwv=(1.-coef)*tmpflt_arr[tmpint]+coef*tmpflt_arr[tmpint+1]; if (!no_ozone_file) { /* printf ("DEBUG: Interpolating ozone at scene center ...\n"); */ interpol_spatial_anc(&anc_O3,center_lat,center_lon,tmpflt_arr); tmpint=(int)(scene_gmt/anc_O3.timeres); if ( anc_O3.nblayers> 1 ){ if (tmpint>=(anc_O3.nblayers-1))tmpint=anc_O3.nblayers-2; coef=(double)(scene_gmt-anc_O3.time[tmpint])/anc_O3.timeres; sixs_tables.uoz=(1.-coef)*tmpflt_arr[tmpint]+ coef*tmpflt_arr[tmpint+1]; } else { sixs_tables.uoz=tmpflt_arr[tmpint]; } } else { jday=(short)input->meta.acq_date.doy; sixs_tables.uoz=calcuoz(jday,(float)center_lat); } sixs_tables.target_alt=0.; /* target altitude in km (sea level) */ sixs_tables.sza=input->meta.sun_zen*DEG; sixs_tables.phi=corrected_sun_az; sixs_tables.vza=0.; sixs_tables.month=9; sixs_tables.day=15; sixs_tables.srefl=0.14; /* printf ("Center : Lat = %7.2f Lon = %7.2f \n",center_lat,center_lon); printf (" O3 = %7.2f SP = %7.2f WV = %7.2f\n",\ sixs_tables.uoz,tmpflt,sixs_tables.uwv); */ switch (input->meta.inst) { case INST_TM: sixs_tables.Inst=SIXS_INST_TM; break; case INST_ETM: sixs_tables.Inst=SIXS_INST_ETM; break; default: EXIT_ERROR("Unknown Instrument", "main"); } create_6S_tables(&sixs_tables, &input->meta); #ifdef SAVE_6S_RESULTS write_6S_results_to_file(SIXS_RESULTS_FILENAME,&sixs_tables); } #endif /*** interpolate ancillary data for AR grid cells ***/ img.is_fill=false; sum_spres_anc=0.; sum_spres_dem=0.; nb_spres_anc=0; nb_spres_dem=0; for (il_ar = 0; il_ar < lut->ar_size.l;il_ar++) { img.l=il_ar*lut->ar_region_size.l+lut->ar_region_size.l/2.; for (is_ar=0;is_ar < lut->ar_size.s; is_ar++) { img.s=is_ar*lut->ar_region_size.s+lut->ar_region_size.s/2.; if (!from_space(space, &img, &geo)) EXIT_ERROR("mapping from space (1)", "main"); ar_gridcell.lat[il_ar*lut->ar_size.s+is_ar]=geo.lat * DEG; ar_gridcell.lon[il_ar*lut->ar_size.s+is_ar]=geo.lon * DEG; ar_gridcell.sun_zen[il_ar*lut->ar_size.s+is_ar]= input->meta.sun_zen*DEG; ar_gridcell.view_zen[il_ar*lut->ar_size.s+is_ar]=3.5; ar_gridcell.rel_az[il_ar*lut->ar_size.s+is_ar]=corrected_sun_az; interpol_spatial_anc(&anc_WV, ar_gridcell.lat[il_ar*lut->ar_size.s+is_ar], ar_gridcell.lon[il_ar*lut->ar_size.s+is_ar],tmpflt_arr); tmpint=(int)(scene_gmt/anc_WV.timeres); if (tmpint>=(anc_WV.nblayers-1)) tmpint=anc_WV.nblayers-2; coef=(double)(scene_gmt-anc_WV.time[tmpint])/anc_WV.timeres; ar_gridcell.wv[il_ar*lut->ar_size.s+is_ar]=(1.-coef)* tmpflt_arr[tmpint]+coef*tmpflt_arr[tmpint+1]; if (!no_ozone_file) { interpol_spatial_anc(&anc_O3, ar_gridcell.lat[il_ar*lut->ar_size.s+is_ar], ar_gridcell.lon[il_ar*lut->ar_size.s+is_ar],tmpflt_arr); tmpint=(int)(scene_gmt/anc_O3.timeres); if ( anc_O3.nblayers> 1 ){ if (tmpint>=(anc_O3.nblayers-1)) tmpint=anc_O3.nblayers-2; coef=(double)(scene_gmt-anc_O3.time[tmpint])/anc_O3.timeres; ar_gridcell.ozone[il_ar*lut->ar_size.s+is_ar]=(1.-coef)* tmpflt_arr[tmpint]+coef*tmpflt_arr[tmpint+1]; } else { ar_gridcell.ozone[il_ar*lut->ar_size.s+is_ar]= tmpflt_arr[tmpint]; } } else { jday=(short)input->meta.acq_date.doy; ar_gridcell.ozone[il_ar*lut->ar_size.s+is_ar]=calcuoz(jday, (float)ar_gridcell.lat[il_ar*lut->ar_size.s+is_ar]); } interpol_spatial_anc(&anc_SP, ar_gridcell.lat[il_ar*lut->ar_size.s+is_ar], ar_gridcell.lon[il_ar*lut->ar_size.s+is_ar],tmpflt_arr); tmpint=(int)(scene_gmt/anc_SP.timeres); if (tmpint>=(anc_SP.nblayers-1)) tmpint=anc_SP.nblayers-2; coef=(double)(scene_gmt-anc_SP.time[tmpint])/anc_SP.timeres; ar_gridcell.spres[il_ar*lut->ar_size.s+is_ar]=(1.-coef)* tmpflt_arr[tmpint]+coef*tmpflt_arr[tmpint+1]; if (ar_gridcell.spres[il_ar*lut->ar_size.s+is_ar] > 0) { sum_spres_anc += ar_gridcell.spres[il_ar*lut->ar_size.s+is_ar]; nb_spres_anc++; } if (dem_available) { ar_gridcell.spres_dem[il_ar*lut->ar_size.s+is_ar]= get_dem_spres(dem_array, ar_gridcell.lat[il_ar*lut->ar_size.s+is_ar], ar_gridcell.lon[il_ar*lut->ar_size.s+is_ar]); if (ar_gridcell.spres_dem[il_ar*lut->ar_size.s+is_ar] > 0) { sum_spres_dem += ar_gridcell.spres_dem[il_ar*lut->ar_size.s+is_ar]; nb_spres_dem++; } } } /* for is_ar */ } /* for il_ar */ if (dem_available) { for (il_ar = 0; il_ar < lut->ar_size.l;il_ar++) for (is_ar=0;is_ar < lut->ar_size.s; is_ar++) if ((ar_gridcell.spres[il_ar*lut->ar_size.s+is_ar] > 0)&& (ar_gridcell.spres_dem[il_ar*lut->ar_size.s+is_ar] > 0)) ar_gridcell.spres[il_ar*lut->ar_size.s+is_ar]= ar_gridcell.spres_dem[il_ar*lut->ar_size.s+is_ar]* ar_gridcell.spres[il_ar*lut->ar_size.s+is_ar]/1013.; } /* Compute atmospheric coefs for the whole scene with aot550=0.01 for use in internal cloud screening : NAZMI */ nbpts=lut->ar_size.l*lut->ar_size.s; /*** Allocate memory for atmos_coeff ***/ if (allocate_mem_atmos_coeff(nbpts,&atmos_coef)) EXIT_ERROR("Allocating memory for atmos_coef", "main"); printf("Compute Atmos Params with aot550 = 0.01\n"); fflush(stdout); update_atmos_coefs(&atmos_coef, &ar_gridcell, &sixs_tables, line_ar, lut, input->nband, 1); /* Read input first time and compute clear pixels stats for internal cloud screening */ /* allocate memory for cld_diags structure and clear sum and nb of obs */ if (allocate_cld_diags(&cld_diags,CLDDIAGS_CELLHEIGHT_5KM, CLDDIAGS_CELLWIDTH_5KM, input->size.l, input->size.s)) { EXIT_ERROR("couldn't allocate memory from cld_diags","main"); } /* Screen the clouds */ for (il = 0; il < input->size.l; il++) { if (!(il%100)) { printf("First pass cloud screening for line %d\r",il); fflush(stdout); } /* Read each input band */ for (ib = 0; ib < input->nband; ib++) { if (!GetInputLine(input, ib, il, line_in[0][ib])) EXIT_ERROR("reading input data for a line (b)", "main"); } if (!GetInputQALine(input, il, qa_line[0])) EXIT_ERROR("reading input data for qa_line (1)", "main"); if (param->thermal_band) { if (!GetInputLine(input_b6, 0, il, b6_line[0])) EXIT_ERROR("reading input data for b6_line (1)", "main"); } tmpint = (int)(scene_gmt / anc_ATEMP.timeres); if (tmpint >= anc_ATEMP.nblayers - 1) tmpint = anc_ATEMP.nblayers - 2; coef = (double)(scene_gmt - anc_ATEMP.time[tmpint]) / anc_ATEMP.timeres; img.is_fill = false; img.l = il; #ifdef _OPENMP #pragma omp parallel for private (is, geo, flat, flon, tmpflt_arr) firstprivate (img, atemp_line) #endif for (is = 0; is < input->size.s; is++) { /* Get the geolocation info for this pixel */ img.s = is; if (!from_space (space, &img, &geo)) EXIT_ERROR("mapping from space (2)", "main"); flat = geo.lat * DEG; flon = geo.lon * DEG; /* Interpolate the anciliary data for this lat/long, then pull the information for the scene center time and adjust */ interpol_spatial_anc (&anc_ATEMP, flat, flon, tmpflt_arr); atemp_line[is] = (1. - coef) * tmpflt_arr[tmpint] + coef * tmpflt_arr[tmpint+1]; } /* Run Cld Screening Pass1 and compute stats. This cloud detection function contains statistics gathering that needs to be in a critical section for multi-threading. */ if (param->thermal_band) if (!cloud_detection_pass1 (lut, input->size.s, il, line_in[0], qa_line[0], b6_line[0], atemp_line, &cld_diags)) EXIT_ERROR("running cloud detection pass 1", "main"); } /* end for il */ printf ("\n"); if (param->thermal_band) { for (il = 0; il < cld_diags.nbrows; il++) { if (!(il%100)) { printf("Second pass cloud screening for line %d\r",il); fflush(stdout); } tmpint=(int)(scene_gmt / anc_ATEMP.timeres); if (tmpint >= anc_ATEMP.nblayers - 1) tmpint = anc_ATEMP.nblayers - 2; coef =(double)(scene_gmt - anc_ATEMP.time[tmpint]) / anc_ATEMP.timeres; /* Note the right shift by 1 is a faster way of divide by 2 */ img.is_fill = false; img.l = il * cld_diags.cellheight + (cld_diags.cellheight >> 1); if (img.l >= input->size.l) img.l = input->size.l-1; for (is = 0; is < cld_diags.nbcols; is++) { img.s = is * cld_diags.cellwidth + (cld_diags.cellwidth >> 1); if (img.s >= input->size.s) img.s = input->size.s-1; if (!from_space (space, &img, &geo)) EXIT_ERROR("mapping from space (3)", "main"); flat=geo.lat * DEG; flon=geo.lon * DEG; interpol_spatial_anc(&anc_ATEMP,flat,flon,tmpflt_arr); cld_diags.airtemp_2m[il][is] = (1.-coef)*tmpflt_arr[tmpint] + coef * tmpflt_arr[tmpint+1]; if (cld_diags.nb_t6_clear[il][is] > 0) { sum_value=cld_diags.avg_t6_clear[il][is]; sumsq_value=cld_diags.std_t6_clear[il][is]; cld_diags.avg_t6_clear[il][is] = sum_value/cld_diags.nb_t6_clear[il][is]; if (cld_diags.nb_t6_clear[il][is] > 1) { cld_diags.std_t6_clear[il][is] = (sumsq_value-(sum_value*sum_value)/cld_diags.nb_t6_clear[il][is])/(cld_diags.nb_t6_clear[il][is]-1); cld_diags.std_t6_clear[il][is]=sqrt(fabs(cld_diags.std_t6_clear[il][is])); } else cld_diags.std_t6_clear[il][is] = 0.; sum_value=cld_diags.avg_b7_clear[il][is]; sumsq_value=cld_diags.std_b7_clear[il][is]; cld_diags.avg_b7_clear[il][is] = sum_value/cld_diags.nb_t6_clear[il][is]; if (cld_diags.nb_t6_clear[il][is] > 1) { cld_diags.std_b7_clear[il][is] = (sumsq_value-(sum_value*sum_value)/cld_diags.nb_t6_clear[il][is])/(cld_diags.nb_t6_clear[il][is]-1); cld_diags.std_b7_clear[il][is]=sqrt(fabs(cld_diags.std_b7_clear[il][is])); } else cld_diags.std_b7_clear[il][is]=0; } else { cld_diags.avg_t6_clear[il][is]=-9999.; cld_diags.avg_b7_clear[il][is]=-9999.; cld_diags.std_t6_clear[il][is]=-9999.; cld_diags.std_b7_clear[il][is]=-9999.; } } /* end for is */ } /* end for il */ fill_cld_diags(&cld_diags); #ifdef DEBUG_CLD for (il=0;il<cld_diags.nbrows;il++) for (is=0;is<cld_diags.nbcols;is++) if (fd_cld_diags != NULL) fprintf(fd_cld_diags,"%d %d %d %f %f %f %f %f\n",il,is,cld_diags.nb_t6_clear[il][is],cld_diags.airtemp_2m[il][is],cld_diags.avg_t6_clear[il][is],cld_diags.std_t6_clear[il][is],cld_diags.avg_b7_clear[il][is],cld_diags.std_b7_clear[il][is]); fclose(fd_cld_diags); #endif } /* end if thermal band */ printf ("\n"); /*** Create dark target temporary file ***/ strcpy(tmpfilename, "temporary_dark_target_XXXXXX"); if ((tmpid = mkstemp (tmpfilename)) < 1) EXIT_ERROR("creating filename for dark target temporary file", "main"); close(tmpid); if ((fdtmp=fopen(tmpfilename,"w"))==NULL) EXIT_ERROR("creating dark target temporary file", "main"); /* Read input second time and create cloud and cloud shadow masks */ ptr_rot_cld[0]=rot_cld[0]; ptr_rot_cld[1]=rot_cld[1]; ptr_rot_cld[2]=rot_cld[2]; for (il_start = 0, il_ar = 0; il_start < input->size.l; il_start += lut->ar_region_size.l, il_ar++) { ar_gridcell.line_lat=&(ar_gridcell.lat[il_ar*lut->ar_size.s]); ar_gridcell.line_lon=&(ar_gridcell.lon[il_ar*lut->ar_size.s]); ar_gridcell.line_sun_zen=&(ar_gridcell.sun_zen[il_ar*lut->ar_size.s]); ar_gridcell.line_view_zen=&(ar_gridcell.view_zen[il_ar*lut->ar_size.s]); ar_gridcell.line_rel_az=&(ar_gridcell.rel_az[il_ar*lut->ar_size.s]); ar_gridcell.line_wv=&(ar_gridcell.wv[il_ar*lut->ar_size.s]); ar_gridcell.line_spres=&(ar_gridcell.spres[il_ar*lut->ar_size.s]); ar_gridcell.line_ozone=&(ar_gridcell.ozone[il_ar*lut->ar_size.s]); ar_gridcell.line_spres_dem=&(ar_gridcell.spres[il_ar*lut->ar_size.s]); il_end = il_start + lut->ar_region_size.l - 1; if (il_end >= input->size.l) il_end = input->size.l - 1; /* Read each input band for each line in region */ for (il = il_start; il < (il_end + 1); il++) { il_region = il - il_start; for (ib = 0; ib < input->nband; ib++) { if (!GetInputLine(input, ib, il, line_in[il_region][ib])) EXIT_ERROR("reading input data for a line (a)", "main"); } if (!GetInputQALine(input, il, qa_line[il_region])) EXIT_ERROR("reading input data for qa_line (2)", "main"); if (param->thermal_band) { if (!GetInputLine(input_b6, 0, il, b6_line[il_region])) EXIT_ERROR("reading input data for b6_line (2)", "main"); /* Run Cld Screening Pass2 */ if (!cloud_detection_pass2(lut, input->size.s, il, line_in[il_region], qa_line[il_region], b6_line[il_region], &cld_diags, ptr_rot_cld[1][il_region])) EXIT_ERROR("running cloud detection pass 2", "main"); } else { if (!cloud_detection_pass2(lut, input->size.s, il, line_in[il_region], qa_line[il_region], NULL, &cld_diags, ptr_rot_cld[1][il_region])) EXIT_ERROR("running cloud detection pass 2", "main"); } } /* end for il */ if (param->thermal_band) { /* Cloud Mask Dilation : 5 pixels */ if (!dilate_cloud_mask(lut, input->size.s, ptr_rot_cld, 5)) EXIT_ERROR("running cloud mask dilation", "main"); /* Cloud shadow */ cast_cloud_shadow(lut, input->size.s, il_start, line_in, b6_line, &cld_diags,ptr_rot_cld,&ar_gridcell, space_def.pixel_size[0], adjust_north); /* Dilate Cloud shadow */ dilate_shadow_mask(lut, input->size.s, ptr_rot_cld, 5); } /*** Save cloud and cloud shadow in temporary file ***/ if (il_ar > 0) if (fwrite(ptr_rot_cld[0][0],lut->ar_region_size.l*input->size.s,1, fdtmp) != 1) EXIT_ERROR("writing dark target to temporary file", "main"); ptr_tmp_cld=ptr_rot_cld[0]; ptr_rot_cld[0]=ptr_rot_cld[1]; ptr_rot_cld[1]=ptr_rot_cld[2]; ptr_rot_cld[2]=ptr_tmp_cld; for (i=0;i<lut->ar_region_size.l;i++) memset(&ptr_rot_cld[2][i][0],0,input->size.s); } /* end for il_start */ /** Last Block **/ dilate_shadow_mask(lut, input->size.s, ptr_rot_cld, 5); if (fwrite(ptr_rot_cld[0][0],lut->ar_region_size.l*input->size.s,1,fdtmp) != 1) EXIT_ERROR("writing dark target to temporary file", "main"); fclose(fdtmp); /* Done with the cloud diagnostics */ free_cld_diags (&cld_diags); /*** Open temporary file for read and write ***/ if ((fdtmp=fopen(tmpfilename,"r+"))==NULL) EXIT_ERROR("opening dark target temporary file (r+)", "main"); /* Read input second time and compute the aerosol for each region */ for (il_start = 0, il_ar = 0; il_start < input->size.l; il_start += lut->ar_region_size.l, il_ar++) { ar_gridcell.line_lat=&(ar_gridcell.lat[il_ar*lut->ar_size.s]); ar_gridcell.line_lon=&(ar_gridcell.lon[il_ar*lut->ar_size.s]); ar_gridcell.line_sun_zen=&(ar_gridcell.sun_zen[il_ar*lut->ar_size.s]); ar_gridcell.line_view_zen=&(ar_gridcell.view_zen[il_ar*lut->ar_size.s]); ar_gridcell.line_rel_az=&(ar_gridcell.rel_az[il_ar*lut->ar_size.s]); ar_gridcell.line_wv=&(ar_gridcell.wv[il_ar*lut->ar_size.s]); ar_gridcell.line_spres=&(ar_gridcell.spres[il_ar*lut->ar_size.s]); ar_gridcell.line_ozone=&(ar_gridcell.ozone[il_ar*lut->ar_size.s]); ar_gridcell.line_spres_dem=&(ar_gridcell.spres[il_ar*lut->ar_size.s]); il_end = il_start + lut->ar_region_size.l - 1; if (il_end >= input->size.l) il_end = input->size.l - 1; if (fseek(fdtmp,(long)(il_ar*lut->ar_region_size.l*input->size.s), SEEK_SET)) EXIT_ERROR("seeking in temporary file (r)", "main"); if (fread(ddv_line[0],lut->ar_region_size.l*input->size.s,1,fdtmp)!=1) EXIT_ERROR("reading dark target to temporary file", "main"); /* Read each input band for each line in region */ for (il = il_start, il_region = 0; il < (il_end + 1); il++, il_region++) { for (ib = 0; ib < input->nband; ib++) { if (!GetInputLine(input, ib, il, line_in[il_region][ib])) EXIT_ERROR("reading input data for a line (a)", "main"); } } /* end for il */ /* Compute the aerosol for the regions */ #ifdef DEBUG_AR diags_il_ar=il_ar; #endif if (!Ar(il_ar,lut, &input->size, line_in, ddv_line, line_ar[il_ar], &ar_stats, &ar_gridcell, &sixs_tables)) EXIT_ERROR("computing aerosol", "main"); /*** Save dark target map in temporary file ***/ if (fseek(fdtmp,il_ar*lut->ar_region_size.l*input->size.s,SEEK_SET)) EXIT_ERROR("seeking in temporary file (w)", "main"); if (fwrite(ddv_line[0],lut->ar_region_size.l*input->size.s,1,fdtmp)!=1) EXIT_ERROR("writing dark target to temporary file", "main"); } /* end for il_start */ printf("\n"); fclose(fdtmp); #ifdef DEBUG_AR fclose(fd_ar_diags); #endif /*** Fill Gaps in the coarse resolution aerosol product for bands 1(0), 2(1) and 3(2) ***/ Fill_Ar_Gaps(lut, line_ar, 0); /* Compute atmospheric coeffs for the whole scene using retrieved aot */ nbpts=lut->ar_size.l*lut->ar_size.s; printf("Compute Atmos Params\n"); fflush(stdout); #ifdef NO_AEROSOL_CORRECTION update_atmos_coefs(&atmos_coef,&ar_gridcell, &sixs_tables,line_ar, lut, input->nband, 1); #else update_atmos_coefs(&atmos_coef,&ar_gridcell, &sixs_tables,line_ar, lut, input->nband, 0); /*Eric COMMENTED TO PERFORM NO CORRECTION*/ #endif /* Re-read input and compute surface reflectance */ /*** Open temporary file for read ***/ if ((fdtmp=fopen(tmpfilename,"r"))==NULL) EXIT_ERROR("opening dark target temporary file", "main"); for (il = 0; il < input->size.l; il++) { if (!(il%100)) { printf("Processing surface reflectance for line %d\r",il); fflush(stdout); } /* Re-read each input band */ for (ib = 0; ib < input->nband; ib++) { if (!GetInputLine(input, ib, il, line_in[0][ib])) EXIT_ERROR("reading input data for a line (b)", "main"); } if (!GetInputLine(input_b6, 0, il, b6_line[0])) EXIT_ERROR("reading input data for b6_line (1)", "main"); /* Compute the surface reflectance */ if (!Sr(lut, input->size.s, il, line_in[0], line_out, &sr_stats)) EXIT_ERROR("computing surface reflectance for a line", "main"); /*** Read line from dark target temporary file ***/ if (fread(ddv_line[0],input->size.s,1,fdtmp)!=1) EXIT_ERROR("reading line from dark target temporary file", "main"); loc.l=il; i_aot=il/lut->ar_region_size.l; t6s_seuil=280.+(1000.*0.01); for (is=0;is<input->size.s;is++) { loc.s=is; j_aot=is/lut->ar_region_size.s; /* Initialize QA band to off */ line_out[lut->nband+CLOUD][is] = QA_OFF; /* Determine if this is a fill pixel -- mark as fill if any reflective band for this pixel is fill */ refl_is_fill = false; for (ib = 0; ib < input->nband; ib++) { if (line_in[0][ib][is] == lut->in_fill) if (!refl_is_fill) refl_is_fill = true; } /* Process QA for each pixel */ if (!refl_is_fill) { /* AOT / opacity */ ArInterp(lut, &loc, line_ar, &inter_aot); line_out[lut->nband+ATMOS_OPACITY][is] = inter_aot; /* QA is written out in the cloud band as a bit-packed product (16-bit). We will use QA values as-is and no further post-processing QA step will be implemented. We want the QA to reflect the cloud, etc. status that was used in the aerosol and surface reflectance computations. We are not interested in post-processing of the QA information, as there are better QA products available. */ if (ddv_line[0][is]&0x01) line_out[lut->nband+CLOUD][is] |= (1 << DDV_BIT); if (ddv_line[0][is]&0x04) line_out[lut->nband+CLOUD][is] |= (1 << ADJ_CLOUD_BIT); if (!(ddv_line[0][is]&0x10)) /* if water, turn on */ line_out[lut->nband+CLOUD][is] |= (1 << LAND_WATER_BIT); if (ddv_line[0][is]&0x20) line_out[lut->nband+CLOUD][is] |= (1 << CLOUD_BIT); if (ddv_line[0][is]&0x40) line_out[lut->nband+CLOUD][is] |= (1 << CLOUD_SHADOW_BIT); if (ddv_line[0][is]&0x80) line_out[lut->nband+CLOUD][is] |= (1 << SNOW_BIT); } else { line_out[lut->nband][is]=lut->aerosol_fill; } } /* for is */ /* Write each output band */ for (ib = 0; ib < output->nband_out; ib++) { if (!PutOutputLine(output, ib, il, line_out[ib])) EXIT_ERROR("writing output data for a line", "main"); } } /* for il */ printf("\n"); fclose(fdtmp); unlink(tmpfilename); /* Print the statistics, skip bands that don't exist */ printf(" total pixels %ld\n", ((long)input->size.l * (long)input->size.s)); printf(" aerosol coarse nfill %ld min %d max %d\n", ar_stats.nfill, ar_stats.ar_min, ar_stats.ar_max); for (ib = 0; ib < lut->nband; ib++) { if (output->metadata.band[ib].name != NULL) printf(" sr %s nfill %ld nsatu %ld nout_range %ld min %d " "max %d\n", output->metadata.band[ib].name, sr_stats.nfill[ib], sr_stats.nsatu[ib], sr_stats.nout_range[ib], sr_stats.sr_min[ib], sr_stats.sr_max[ib]); } /* Close input files */ if (!CloseInput(input)) EXIT_ERROR("closing input file", "main"); if (!CloseOutput(output)) EXIT_ERROR("closing input file", "main"); /* Write the ENVI header for reflectance files */ for (ib = 0; ib < output->nband_out; ib++) { /* Create the ENVI header file this band */ if (create_envi_struct (&output->metadata.band[ib], &xml_metadata.global, &envi_hdr) != SUCCESS) EXIT_ERROR("Creating the ENVI header structure for this file.", "main"); /* Write the ENVI header */ strcpy (envi_file, output->metadata.band[ib].file_name); cptr = strchr (envi_file, '.'); strcpy (cptr, ".hdr"); if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS) EXIT_ERROR("Writing the ENVI header file.", "main"); } /* Append the reflective and thermal bands to the XML file */ if (append_metadata (output->nband_out, output->metadata.band, param->input_xml_file_name) != SUCCESS) EXIT_ERROR("appending surfance reflectance and QA bands", "main"); /* Free the metadata structure */ free_metadata (&xml_metadata); /* Free memory */ free_mem_atmos_coeff(&atmos_coef); if (!FreeInput(input)) EXIT_ERROR("freeing input file stucture", "main"); if (!FreeInput(input_b6)) EXIT_ERROR("freeing input_b6 file stucture", "main"); if (!FreeLut(lut)) EXIT_ERROR("freeing lut file stucture", "main"); if (!FreeOutput(output)) EXIT_ERROR("freeing output file stucture", "main"); free(space); free(line_out[0]); free(line_ar[0][0]); free(line_ar[0]); free(line_ar); free(line_in[0][0]); free(line_in[0]); free(line_in); free(qa_line[0]); free(qa_line); if (param->thermal_band) { free(b6_line[0]); free(b6_line); } free(ddv_line[0]); free(ddv_line); free(rot_cld[0][0]); free(rot_cld[0]); free(ar_gridcell.lat); free(ar_gridcell.lon); free(ar_gridcell.sun_zen); free(ar_gridcell.view_zen); free(ar_gridcell.rel_az); free(ar_gridcell.wv); free(ar_gridcell.spres); free(ar_gridcell.ozone); for (ifree=0; ifree<(param->num_ncep_files>0?4:1); ifree++) { if (anc_O3.data[ifree]!=NULL) free(anc_O3.data[ifree]); if (anc_WV.data[ifree]!=NULL) free(anc_WV.data[ifree]); if (anc_SP.data[ifree]!=NULL) free(anc_SP.data[ifree]); } if (dem_available) free(dem_array); if (!FreeParam(param)) EXIT_ERROR("freeing parameter stucture", "main"); /* All done */ printf ("lndsr complete.\n"); return (EXIT_SUCCESS); } int allocate_mem_atmos_coeff(int nbpts,atmos_t *atmos_coef) { int ib; if ((atmos_coef->computed=(int *)malloc(nbpts*sizeof(int)))==NULL) return -1; for (ib=0;ib<7;ib++) { if ((atmos_coef->tgOG[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->tgH2O[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->td_ra[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->tu_ra[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->rho_mol[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->rho_ra[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->td_da[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->tu_da[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->S_ra[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->td_r[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->tu_r[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->S_r[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; if ((atmos_coef->rho_r[ib]=(float *)malloc(nbpts*sizeof(float)))==NULL) return -1; } return 0; } int free_mem_atmos_coeff(atmos_t *atmos_coef) { int ib; free(atmos_coef->computed); for(ib=0;ib<7;ib++) { free(atmos_coef->tgOG[ib]); free(atmos_coef->tgH2O[ib]); free(atmos_coef->td_ra[ib]); free(atmos_coef->tu_ra[ib]); free(atmos_coef->rho_mol[ib]); free(atmos_coef->rho_ra[ib]); free(atmos_coef->td_da[ib]); free(atmos_coef->tu_da[ib]); free(atmos_coef->S_ra[ib]); free(atmos_coef->td_r[ib]); free(atmos_coef->tu_r[ib]); free(atmos_coef->S_r[ib]); free(atmos_coef->rho_r[ib]); } return 0; } /****************************************************************************** !C !Routine: calcuoz !Description: Gets ozone concentration for a particular day and latitude, interpolating if necessary. !Revision History: Original version: Nazmi Z El Saleous and Eric Vermote !Input Parameters: jday Julian day lat latitude (in degrees) !Output Parameters: uoz Ozone concentration (in cm-atm) !Return value: returns 0 (success) -1 (latitude is beyond coverage of data, and so the returned ozone value is an approximation). !References and Credits: Data is from: LONDON J.,BOJKOV R.D.,OLTMANS S. AND KELLEY J.I.,1976, ATLAS OF THE GLOBAL DISTRIBUTION OF TOTAL OZONE .JULY 1957-JUNE 1967 NCAR TECHNICAL NOTE, NCAR/TN/113+STR,PP276 !Developers: Nazmi Z El Saleous Eric Vermote University of Maryland / Dept. of Geography nazmi.elsaleous@gsfc.nasa.gov !Design Notes: !END *******************************************************************************/ float calcuoz(short jday,float flat) { float t,u,tmpf; int i1,i2,j1,j2,Minf,Msup,Latinf,Latsup; /* CREPARTITION ZONALE PAR BANDE DE 10 DEG DE LATITUDE A PARTIR DE 80 SUD C --- OZONE --- */ float oz[12][17] = { {.315,.320,.315,.305,.300,.280,.260,.240,.240,.240,.250,.280,.320,.350,.375,.380,.380}, {.280,.300,.300,.300,.280,.270,.260,.240,.240,.240,.260,.300,.340,.380,.400,.420,.420}, {.280,.280,.280,.280,.280,.260,.250,.240,.250,.250,.270,.300,.340,.400,.420,.440,.440}, {.280,.280,.280,.280,.280,.260,.250,.250,.250,.260,.280,.300,.340,.380,.420,.430,.430}, {.280,.290,.300,.300,.280,.270,.260,.250,.250,.260,.270,.300,.320,.360,.380,.400,.400}, {.280,.300,.300,.305,.300,.280,.260,.250,.250,.260,.260,.280,.310,.330,.360,.370,.370}, {.290,.300,.315,.320,.305,.280,.260,.250,.240,.240,.260,.270,.290,.310,.320,.320,.320}, {.300,.310,.320,.325,.320,.300,.270,.260,.240,.240,.250,.260,.280,.290,.300,.300,.290}, {.300,.320,.325,.335,.320,.300,.280,.260,.240,.240,.240,.260,.270,.280,.280,.280,.280}, {.320,.340,.350,.345,.330,.300,.280,.260,.240,.240,.240,.260,.260,.280,.280,.280,.280}, {.360,.360,.360,.340,.320,.300,.280,.260,.240,.240,.240,.260,.280,.300,.310,.310,.300}, {.340,.350,.340,.320,.310,.280,.260,.250,.240,.240,.240,.260,.300,.320,.330,.340,.330}}; /* C /Begin of interpolation/ C /Find Neighbours/ C /First loop for time/ */ if (fabs(flat)>=80.) { tmpf=.270; return -1; } Minf= (int) ((jday-15.)/30.5); if (jday < 15) Minf=Minf-1; Latinf=(int) (flat*0.1); if (flat < 0.) Latinf=Latinf-1; t=((jday-15.)-(30.5*Minf))/30.5; u=(flat-10.*Latinf)*0.1; Minf=(Minf+12)%12; Msup=(Minf+13)%12; Latsup=Latinf+1; i1=Minf; j1=Latinf+8; i2=Msup; j2=Latsup+8; /* Now Calculate Uo3 at the given point Xlat,Jjulien */ tmpf=(1.-t)*(1.-u)*oz[i1][j1] + t*(1.-u)*oz[i2][j1] + t*u*oz[i2][j2] + (1.-t)*u*oz[i1][j2]; return tmpf; } float get_dem_spres(short *dem,float lat,float lon) { int idem,jdem; float dem_spres; idem=(int)((DEM_LATMAX-lat)/DEM_DLAT+0.5); if (idem<0) idem=0; if (idem >= DEM_NBLAT) idem=DEM_NBLAT-1; jdem=(int)((lon-DEM_LONMIN)/DEM_DLON+0.5); if (jdem<0) jdem=0; if (jdem >= DEM_NBLON) jdem=DEM_NBLON-1; if (dem[idem*DEM_NBLON+jdem]== -9999) dem_spres=1013; else dem_spres=1013.2*exp(-dem[idem*DEM_NBLON+jdem]/8000.); return dem_spres; } int update_atmos_coefs ( atmos_t *atmos_coef, Ar_gridcell_t *ar_gridcell, sixs_tables_t *sixs_tables, int ***line_ar, Lut_t *lut, int nband, int bkgd_aerosol ) { int irow,icol; for (irow=0;irow<ar_gridcell->nbrows;irow++) for (icol=0;icol<ar_gridcell->nbcols;icol++) update_gridcell_atmos_coefs(irow,icol,atmos_coef,ar_gridcell, sixs_tables,line_ar[irow],lut,nband,bkgd_aerosol); return 0; } int update_gridcell_atmos_coefs ( int irow, int icol, atmos_t *atmos_coef, Ar_gridcell_t *ar_gridcell, sixs_tables_t *sixs_tables, int **line_ar, Lut_t *lut, int nband, int bkgd_aerosol ) { int ib,ipt,k; float mus,muv,phi,ratio_spres,tau_ray,aot550; double coef; float actual_rho_ray,actual_T_ray_up,actual_T_ray_down,actual_S_r; float rho_ray_P0,T_ray_up_P0,T_ray_down_P0,S_r_P0; float lamda[7]={486.,570.,660.,835.,1669.,0.,2207.}; float tau_ray_sealevel[7]={0.16511,0.08614,0.04716,0.01835,0.00113,0.00037}; /* index=5 => band 7 */ ipt=irow*ar_gridcell->nbcols+icol; mus=cos(ar_gridcell->sun_zen[ipt]*RAD); muv=cos(ar_gridcell->view_zen[ipt]*RAD); phi=ar_gridcell->rel_az[ipt]; ratio_spres=ar_gridcell->spres[ipt]/1013.; if (bkgd_aerosol) { atmos_coef->computed[ipt]=1; aot550=0.01; } else { if (line_ar[0][icol] != lut->aerosol_fill) { atmos_coef->computed[ipt]=1; aot550=((float)line_ar[0][icol]/1000.)*pow((550./lamda[0]),-1.); } else { atmos_coef->computed[ipt]=1; aot550=0.01; } } for (k=1;k<SIXS_NB_AOT;k++) { if (aot550 < sixs_tables->aot[k]) break; } k--; if (k>=(SIXS_NB_AOT-1)) k=SIXS_NB_AOT-2; coef=(aot550-sixs_tables->aot[k])/(sixs_tables->aot[k+1]- sixs_tables->aot[k]); for (ib=0;ib < nband; ib++) { atmos_coef->tgOG[ib][ipt]=sixs_tables->T_g_og[ib]; atmos_coef->tgH2O[ib][ipt]=sixs_tables->T_g_wv[ib]; atmos_coef->td_ra[ib][ipt]=(1.-coef)*sixs_tables->T_ra_down[ib][k]+ coef*sixs_tables->T_ra_down[ib][k+1]; atmos_coef->tu_ra[ib][ipt]=(1.-coef)*sixs_tables->T_ra_up[ib][k]+ coef*sixs_tables->T_ra_up[ib][k+1]; atmos_coef->rho_mol[ib][ipt]=sixs_tables->rho_r[ib]; atmos_coef->rho_ra[ib][ipt]=(1.-coef)*sixs_tables->rho_ra[ib][k]+ coef*sixs_tables->rho_ra[ib][k+1]; atmos_coef->td_da[ib][ipt]=(1.-coef)*sixs_tables->T_a_down[ib][k]+ coef*sixs_tables->T_a_down[ib][k+1]; atmos_coef->tu_da[ib][ipt]=(1.-coef)*sixs_tables->T_a_up[ib][k]+ coef*sixs_tables->T_a_up[ib][k+1]; atmos_coef->S_ra[ib][ipt]=(1.-coef)*sixs_tables->S_ra[ib][k]+ coef*sixs_tables->S_ra[ib][k+1]; /** compute DEM-based pressure correction for each grid point **/ tau_ray=tau_ray_sealevel[ib]*ratio_spres; chand(&phi,&muv,&mus,&tau_ray,&actual_rho_ray); actual_T_ray_down=((2./3.+mus)+(2./3.-mus)*exp(-tau_ray/mus))/ (4./3.+tau_ray); /* downward */ actual_T_ray_up = ((2./3.+muv)+(2./3.-muv)*exp(-tau_ray/muv))/ (4./3.+tau_ray); /* upward */ csalbr(&tau_ray,&actual_S_r); rho_ray_P0=sixs_tables->rho_r[ib]; T_ray_down_P0=sixs_tables->T_r_down[ib]; T_ray_up_P0=sixs_tables->T_r_up[ib]; S_r_P0=sixs_tables->S_r[ib]; atmos_coef->rho_ra[ib][ipt]=actual_rho_ray+(atmos_coef->rho_ra[ib][ipt]- rho_ray_P0); /* will need to correct for uwv/2 */ atmos_coef->td_ra[ib][ipt] *= (actual_T_ray_down/T_ray_down_P0); atmos_coef->tu_ra[ib][ipt] *= (actual_T_ray_up/T_ray_up_P0); atmos_coef->S_ra[ib][ipt] = atmos_coef->S_ra[ib][ipt]-S_r_P0+actual_S_r; atmos_coef->td_r[ib][ipt] = actual_T_ray_down; atmos_coef->tu_r[ib][ipt] = actual_T_ray_up; atmos_coef->S_r[ib][ipt] = actual_S_r; atmos_coef->rho_r[ib][ipt] = actual_rho_ray; } /* for ib */ return 0; } void sun_angles ( short jday, float gmt, float flat, float flon, float *ts, float *fs ) { double mst,tst,tet,et,ha,delta; double dlat,amuzero,elev,az,caz,azim; double A1=.000075,A2=.001868,A3=.032077,A4=.014615,A5=.040849; double B1=.006918,B2=.399912,B3=.070257,B4=.006758; double B5=.000907,B6=.002697,B7=.001480; dlat=(double)flat*M_PI/180.; /* SOLAR POSITION (ZENITHAL ANGLE ThetaS,AZIMUTHAL ANGLE PhiS IN DEGREES) J IS THE DAY NUMBER IN THE YEAR MEAN SOLAR TIME (HEURE DECIMALE) */ mst=gmt+(flon)/15.; tet=2.*M_PI*(double)jday/365.; /* TIME EQUATION (IN MN.DEC) */ et=A1+A2*cos(tet)-A3*sin(tet)-A4*cos(2.*tet)-A5*sin(2.*tet); et=et*12.*60./M_PI; /* TRUE SOLAR TIME */ tst=mst+et/60.; tst=(tst-12.); /* HOUR ANGLE */ ha=tst*15.*M_PI/180.; /* SOLAR DECLINATION (IN RADIAN) */ delta=B1-B2*cos(tet)+B3*sin(tet)-B4*cos(2.*tet)+B5*sin(2.*tet)- B6*cos(3.*tet)+B7*sin(3.*tet); /* ELEVATION,AZIMUTH */ amuzero=sin(dlat)*sin(delta)+cos(dlat)*cos(delta)*cos(ha); elev=asin(amuzero); az=cos(delta)*sin(ha)/cos(elev); if (az<-1.) az=-1; else if (az>1.) az=1.; caz=(-cos(dlat)*sin(delta)+sin(dlat)*cos(delta)*cos(ha))/cos(elev); azim=asin(az); if (caz < 0.) azim=M_PI-azim; if ((caz > 0.) && (az < 0.)) azim=2*M_PI+azim; azim=azim+M_PI; if (azim > (2.*M_PI)) azim=azim-2.*M_PI; elev=elev*180./M_PI; /* CONVERSION IN DEGREES */ *ts=90.-elev; *fs=azim*180./M_PI; return; }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % John Cristy % % March 2003 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/MagickCore.h" #include "magick/exception-private.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; size_t number_threads; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->number_threads=image_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DuplexTransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticIndexes() returns the image view authentic indexes. % % The format of the GetImageViewAuthenticPixels method is: % % IndexPacket *GetImageViewAuthenticIndexes(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport IndexPacket *GetImageViewAuthenticIndexes( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewAuthenticIndexQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % PixelPacket *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport PixelPacket *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MaxTextExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualIndexes() returns the image view virtual indexes. % % The format of the GetImageViewVirtualIndexes method is: % % const IndexPacket *GetImageViewVirtualIndexes( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const IndexPacket *GetImageViewVirtualIndexes( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewVirtualIndexQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const PixelPacket *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const PixelPacket *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand) % % A description of each parameter follows: % % o wand: the wand. % */ MagickExport ImageView *NewImageView(Image *image) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireCacheView(image_view->image); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->number_threads=GetOpenMPMaximumThreads(); image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireCacheView(image_view->image); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->number_threads=GetOpenMPMaximumThreads(); image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(destination->number_threads) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetImageViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewThreads() sets the number of threads in a thread team. % % The format of the SetImageViewDescription method is: % % void SetImageViewThreads(ImageView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetImageViewThreads(ImageView *image_view, const size_t number_threads) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); image_view->number_threads=number_threads; if (number_threads > GetOpenMPMaximumThreads()) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const PixelPacket *restrict pixels; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UpdateImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
serialized.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); int t = (int)sin(0.1); #pragma omp task if(t) { print_frame(1); print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[MAIN_REENTER]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=[[REENTER]], new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // <- ompt_event_task_schedule ([[IMPLICIT_TASK_ID]], [[TASK_ID]]) would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // <- ompt_event_task_schedule ([[TASK_ID]], [[IMPLICIT_TASK_ID]]) would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reen // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
util.c
#include "compiler.h" #include "util.h" void * mymalloc(size_t bytes) { void * ptr = NULL; int rc = posix_memalign(&ptr, alignment, bytes); if (rc != 0 || ptr == NULL) abort(); return ptr; } size_t compare_doubles(size_t n, const double * RESTRICT x, const double * RESTRICT y) { size_t errors = 0; #pragma omp parallel for reduction(+:errors) for (size_t i=0; i<n; i++) { if (x[i] != y[i]) errors++; } return errors; } size_t compare_doubles_stride(size_t n, const double * RESTRICT x, const double * RESTRICT y, int stride) { size_t errors = 0; #pragma omp parallel for reduction(+:errors) for (size_t i=0; i<n; i+=stride) { if (x[i] != y[i]) errors++; } return errors; } size_t compare_doubles_stride_holes(size_t n, const double * RESTRICT x, const double * RESTRICT y, int stride, double val) { size_t errors = 0; #pragma omp parallel for reduction(+:errors) for (size_t i=0; i<n; i+=stride) { /* check the part that is copied */ if (y[i] != x[i]) errors++; /* between the strides, elements should not change */ for (int s=1; s<stride && i+s<n; s++) { if (y[i+s] != val) errors++; } } return errors; } void init_doubles(size_t n, double * RESTRICT x) { #pragma omp parallel for for (size_t i=0; i<n; i++) { x[i] = (double)i; } } void set_doubles(size_t n, double value, double * RESTRICT x) { #pragma omp parallel for for (size_t i=0; i<n; i++) { x[i] = value; } } void print_doubles_1(size_t n, const double * RESTRICT x) { for (size_t i=0; i<n; i++) { printf("%zu %lf\n", i, x[i]); } fflush(stdout); } void print_doubles_2(size_t n, const double * RESTRICT x, const double * RESTRICT y) { for (size_t i=0; i<n; i++) { printf("%zu %lf %lf\n", i, x[i], y[i]); } fflush(stdout); } void print_compare_doubles_stride_holes(size_t n, const double * RESTRICT x, const double * RESTRICT y, int stride, double val) { for (size_t i=0; i<n; i+=stride) { printf("%zu %lf %lf %s\n", i, y[i], x[i], (y[i]==x[i]) ? "" : "ERROR"); for (int s=1; s<stride && i+s<n; s++) { printf("%zu %lf %lf %s\n", i+s, y[i+s], val, (y[i+s]==val) ? "" : "ERROR"); } } fflush(stdout); }
DataGen.h
// Copyright (C) 2019-2020 Zilliz. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under the License #pragma once #include <boost/algorithm/string/predicate.hpp> #include <cstring> #include <memory> #include <random> #include "Constants.h" #include "common/Schema.h" #include "knowhere/index/vector_index/VecIndex.h" #include "knowhere/index/vector_index/adapter/VectorAdapter.h" #include "knowhere/index/vector_index/VecIndexFactory.h" #include "knowhere/index/vector_index/IndexIVF.h" #include "query/SearchOnIndex.h" #include "segcore/SegmentGrowingImpl.h" #include "segcore/SegmentSealedImpl.h" using boost::algorithm::starts_with; namespace milvus::segcore { struct GeneratedData { std::vector<uint8_t> rows_; std::vector<aligned_vector<uint8_t>> cols_; std::vector<idx_t> row_ids_; std::vector<Timestamp> timestamps_; RowBasedRawData raw_; template <typename T> auto get_col(int index) const { auto& target = cols_.at(index); std::vector<T> ret(target.size() / sizeof(T)); memcpy(ret.data(), target.data(), target.size()); return ret; } template <typename T> auto get_mutable_col(int index) { auto& target = cols_.at(index); assert(target.size() == row_ids_.size() * sizeof(T)); auto ptr = reinterpret_cast<T*>(target.data()); return ptr; } private: GeneratedData() = default; friend GeneratedData DataGen(SchemaPtr schema, int64_t N, uint64_t seed, uint64_t ts_offset); void generate_rows(int64_t N, SchemaPtr schema); }; inline void GeneratedData::generate_rows(int64_t N, SchemaPtr schema) { std::vector<int> offset_infos(schema->size() + 1, 0); auto sizeof_infos = schema->get_sizeof_infos(); std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1); int64_t len_per_row = offset_infos.back(); assert(len_per_row == schema->get_total_sizeof()); // change column-based data to row-based data std::vector<uint8_t> result(len_per_row * N); for (int index = 0; index < N; ++index) { for (int fid = 0; fid < schema->size(); ++fid) { auto len = sizeof_infos[fid]; auto offset = offset_infos[fid]; auto src = cols_[fid].data() + index * len; auto dst = result.data() + index * len_per_row + offset; memcpy(dst, src, len); } } rows_ = std::move(result); raw_.raw_data = rows_.data(); raw_.sizeof_per_row = schema->get_total_sizeof(); raw_.count = N; } inline GeneratedData DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0) { using std::vector; std::vector<aligned_vector<uint8_t>> cols; std::default_random_engine er(seed); std::normal_distribution<> distr(0, 1); int offset = 0; auto insert_cols = [&cols](auto& data) { using T = std::remove_reference_t<decltype(data)>; auto len = sizeof(typename T::value_type) * data.size(); auto ptr = aligned_vector<uint8_t>(len); memcpy(ptr.data(), data.data(), len); cols.emplace_back(std::move(ptr)); }; for (auto& field : schema->get_fields()) { switch (field.get_data_type()) { case engine::DataType::VECTOR_FLOAT: { auto dim = field.get_dim(); vector<float> final(dim * N); bool is_ip = starts_with(field.get_name().get(), "normalized"); #pragma omp parallel for for (int n = 0; n < N; ++n) { vector<float> data(dim); float sum = 0; std::default_random_engine er2(seed + n); std::normal_distribution<> distr2(0, 1); for (auto& x : data) { x = distr2(er2) + offset; sum += x * x; } if (is_ip) { sum = sqrt(sum); for (auto& x : data) { x /= sum; } } std::copy(data.begin(), data.end(), final.begin() + dim * n); } insert_cols(final); break; } case engine::DataType::VECTOR_BINARY: { auto dim = field.get_dim(); Assert(dim % 8 == 0); vector<uint8_t> data(dim / 8 * N); for (auto& x : data) { x = er(); } insert_cols(data); break; } case engine::DataType::INT64: { vector<int64_t> data(N); // begin with counter if (starts_with(field.get_name().get(), "counter")) { int64_t index = 0; for (auto& x : data) { x = index++; } } else { int i = 0; for (auto& x : data) { x = er() % (2 * N); x = i; i++; } } insert_cols(data); break; } case engine::DataType::INT32: { vector<int> data(N); for (auto& x : data) { x = er() % (2 * N); } insert_cols(data); break; } case engine::DataType::FLOAT: { vector<float> data(N); for (auto& x : data) { x = distr(er); } insert_cols(data); break; } case engine::DataType::DOUBLE: { vector<double> data(N); for (auto& x : data) { x = distr(er); } insert_cols(data); break; } default: { throw std::runtime_error("unimplemented"); } } ++offset; } GeneratedData res; res.cols_ = std::move(cols); for (int i = 0; i < N; ++i) { res.row_ids_.push_back(i); res.timestamps_.push_back(i + ts_offset); } // std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er); res.generate_rows(N, schema); return std::move(res); } inline auto CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) { namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::FloatVector); std::normal_distribution<double> dis(0, 1); std::default_random_engine e(seed); for (int i = 0; i < num_queries; ++i) { std::vector<float> vec; for (int d = 0; d < dim; ++d) { vec.push_back(dis(e)); } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size() * sizeof(float)); } return raw_group; } inline auto CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) { namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::FloatVector); int64_t src_index = 0; for (int i = 0; i < num_queries; ++i) { std::vector<float> vec; for (int d = 0; d < dim; ++d) { vec.push_back(src[src_index++]); } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size() * sizeof(float)); } return raw_group; } inline auto CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) { assert(dim % 8 == 0); namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::BinaryVector); std::default_random_engine e(seed); for (int i = 0; i < num_queries; ++i) { std::vector<uint8_t> vec; for (int d = 0; d < dim / 8; ++d) { vec.push_back(e()); } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size()); } return raw_group; } inline auto CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) { assert(dim % 8 == 0); namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::BinaryVector); for (int i = 0; i < num_queries; ++i) { std::vector<uint8_t> vec; for (int d = 0; d < dim / 8; ++d) { vec.push_back(*ptr); ++ptr; } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size()); } return raw_group; } inline json SearchResultToJson(const SearchResult& sr) { int64_t num_queries = sr.num_queries_; int64_t topk = sr.topk_; std::vector<std::vector<std::string>> results; for (int q = 0; q < num_queries; ++q) { std::vector<std::string> result; for (int k = 0; k < topk; ++k) { int index = q * topk + k; result.emplace_back(std::to_string(sr.ids_[index]) + "->" + std::to_string(sr.distances_[index])); } results.emplace_back(std::move(result)); } return json{results}; }; inline void SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) { // TODO auto row_count = dataset.row_ids_.size(); { LoadFieldDataInfo info; info.blob = dataset.row_ids_.data(); info.row_count = dataset.row_ids_.size(); info.field_id = 0; // field id for RowId seg.LoadFieldData(info); } { LoadFieldDataInfo info; info.blob = dataset.timestamps_.data(); info.row_count = dataset.timestamps_.size(); info.field_id = 1; seg.LoadFieldData(info); } int field_offset = 0; for (auto& meta : seg.get_schema().get_fields()) { LoadFieldDataInfo info; info.field_id = meta.get_id().get(); info.row_count = row_count; info.blob = dataset.cols_[field_offset].data(); seg.LoadFieldData(info); ++field_offset; } } inline std::unique_ptr<SegmentSealed> SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) { auto segment = CreateSealedSegment(schema); SealedLoader(dataset, *segment); segment->LoadIndex(index_info); return segment; } inline knowhere::VecIndexPtr GenIndexing(int64_t N, int64_t dim, const float* vec) { // {knowhere::IndexParams::nprobe, 10}, auto conf = knowhere::Config{{knowhere::meta::DIM, dim}, {knowhere::IndexParams::nlist, 1024}, {knowhere::Metric::TYPE, milvus::knowhere::Metric::L2}, {knowhere::meta::DEVICEID, 0}}; auto database = knowhere::GenDataset(N, dim, vec); auto indexing = std::make_shared<knowhere::IVF>(); indexing->Train(database, conf); indexing->AddWithoutIds(database, conf); return indexing; } } // namespace milvus::segcore
GB_unop__log2_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log2_fc64_fc64 // op(A') function: GB_unop_tran__log2_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog2 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog2 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog2 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log2_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog2 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log2_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__fmod_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__fmod_fp32 // A.*B function (eWiseMult): GB_AemultB__fmod_fp32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__fmod_fp32 // C+=b function (dense accum): GB_Cdense_accumb__fmod_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__fmod_fp32 // C=scalar+B GB_bind1st__fmod_fp32 // C=scalar+B' GB_bind1st_tran__fmod_fp32 // C=A+scalar GB_bind2nd__fmod_fp32 // C=A'+scalar GB_bind2nd_tran__fmod_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = fmodf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = fmodf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__fmod_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__fmod_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__fmod_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__fmod_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__fmod_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__fmod_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = fmodf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__fmod_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = fmodf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = fmodf (x, aij) ; \ } GrB_Info GB_bind1st_tran__fmod_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = fmodf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__fmod_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099*i-0.099); y_map[i].x=0.4321260306242638*(1.099*i-0.099); z_map[i].x=0.08392226148409894*(1.099*i-0.099); x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099); y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099); z_map[i].y=0.3994769827314126*(1.099*i-0.099); x_map[i].z=0.3848476530332144*(1.099*i-0.099); y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099); z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_sRGBTransformImage) #endif proceed=SetImageProgress(image,sRGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); if ((image->colorspace == GRAYColorspace) && (image->gamma != 1.0) && (colorspace == sRGBColorspace)) return(SetImageColorspace(image,colorspace,exception)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(image,q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformsRGBImage) #endif proceed=SetImageProgress(image,TransformsRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
otfft_mixedradix.h
// Copyright (c) 2015, OK おじさん(岡久卓也) // Copyright (c) 2015, OK Ojisan(Takuya OKAHISA) // Copyright (c) 2017 to the present, DEWETRON GmbH // OTFFT Implementation Version 9.5 // based on Stockham FFT algorithm // from OK Ojisan(Takuya OKAHISA), source: http://www.moon.sannet.ne.jp/okahisa/stockham/stockham.html #pragma once #include "otfft_misc.h" #define _USE_MATH_DEFINES #include <math.h> namespace OTFFT_NAMESPACE { namespace OTFFT_MixedRadix { ////////////////////////////////////////////////// using namespace OTFFT; using namespace OTFFT_MISC; static const int OMP_THRESHOLD = 1<<15; struct cpx { xmm z; cpx(const xmm& z) noexcept : z(z) {} operator xmm() const noexcept { return z; } }; static inline cpx operator+(const cpx &a, const cpx &b) noexcept { return addpz(a, b); } static inline cpx operator*(const cpx &a, const cpx &b) noexcept { return mulpz(a, b); } /////////////////////////////////////////////////////////////////////////////// // Forward Butterfly Operation /////////////////////////////////////////////////////////////////////////////// void fwdend2(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+0); const ymm b = getpz2(xq+s); setpz2(zq+0, addpz2(a, b)); setpz2(zq+s, subpz2(a, b)); } } else { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); setpz(z[0], addpz(a, b)); setpz(z[1], subpz(a, b)); } } void fwdcore2(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/2; const int N = n*s; const int N0 = 0; const int N1 = N/2; if (s >= 2) { for (int p = 0; p < m; p++) { const int sp = s*p; const int s2p = 2*sp; const ymm wp = duppz3(W[sp]); for (int q = 0; q < s; q += 2) { complex_vector xq_sp = x + q + sp; complex_vector yq_s2p = y + q + s2p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); setpz2(yq_s2p+s*0, addpz2(a, b)); setpz2(yq_s2p+s*1, mulpz2(wp, subpz2(a, b))); } } } else { for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_2p = y + 2*p; const xmm wp = getpz(W[p]); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); setpz(y_2p[0], addpz(a, b)); setpz(y_2p[1], mulpz(wp, subpz(a, b))); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend4(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+s*0); const ymm b = getpz2(xq+s*1); const ymm c = getpz2(xq+s*2); const ymm d = getpz2(xq+s*3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(zq+s*0, addpz2(apc, bpd)); setpz2(zq+s*1, subpz2(amc, jbmd)); setpz2(zq+s*2, subpz2(apc, bpd)); setpz2(zq+s*3, addpz2(amc, jbmd)); } } else { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); const xmm c = getpz(x[2]); const xmm d = getpz(x[3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(z[0], addpz(apc, bpd)); setpz(z[1], subpz(amc, jbmd)); setpz(z[2], subpz(apc, bpd)); setpz(z[3], addpz(amc, jbmd)); } } void fwdcore4(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/4; const int N = n*s; const int N0 = 0; const int N1 = N/4; const int N2 = N1*2; const int N3 = N1*3; if (s >= 2) { for (int p = 0; p < m; p++) { const int sp = s*p; const int s4p = 4*sp; const ymm w1p = duppz3(W[1*sp]); const ymm w2p = duppz3(W[2*sp]); const ymm w3p = duppz3(W[3*sp]); for (int q = 0; q < s; q += 2) { complex_vector xq_sp = x + q + sp; complex_vector yq_s4p = y + q + s4p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); const ymm c = getpz2(xq_sp+N2); const ymm d = getpz2(xq_sp+N3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(yq_s4p+s*0, addpz2(apc, bpd)); setpz2(yq_s4p+s*1, mulpz2(w1p, subpz2(amc, jbmd))); setpz2(yq_s4p+s*2, mulpz2(w2p, subpz2(apc, bpd))); setpz2(yq_s4p+s*3, mulpz2(w3p, addpz2(amc, jbmd))); } } } else { for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_4p = y + 4*p; const xmm w1p = getpz(W[p]); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); const xmm c = getpz(x_p[N2]); const xmm d = getpz(x_p[N3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(y_4p[0], addpz(apc, bpd)); setpz(y_4p[1], mulpz(w1p, subpz(amc, jbmd))); setpz(y_4p[2], mulpz(w2p, subpz(apc, bpd))); setpz(y_4p[3], mulpz(w3p, addpz(amc, jbmd))); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend8(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm x0 = getpz2(xq+s*0); const ymm x1 = getpz2(xq+s*1); const ymm x2 = getpz2(xq+s*2); const ymm x3 = getpz2(xq+s*3); const ymm x4 = getpz2(xq+s*4); const ymm x5 = getpz2(xq+s*5); const ymm x6 = getpz2(xq+s*6); const ymm x7 = getpz2(xq+s*7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); setpz2(zq+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*1, addpz2(s04_mj_s26, w8_s15_mj_s37)); setpz2(zq+s*2, subpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*3, subpz2(s04_pj_s26, v8_s15_pj_s37)); setpz2(zq+s*4, subpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*5, subpz2(s04_mj_s26, w8_s15_mj_s37)); setpz2(zq+s*6, addpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*7, addpz2(s04_pj_s26, v8_s15_pj_s37)); } } else { const xmm x0 = getpz(x[0]); const xmm x1 = getpz(x[1]); const xmm x2 = getpz(x[2]); const xmm x3 = getpz(x[3]); const xmm x4 = getpz(x[4]); const xmm x5 = getpz(x[5]); const xmm x6 = getpz(x[6]); const xmm x7 = getpz(x[7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); setpz(z[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(z[1], addpz(s04_mj_s26, w8_s15_mj_s37)); setpz(z[2], subpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[3], subpz(s04_pj_s26, v8_s15_pj_s37)); setpz(z[4], subpz(a04_p1_a26, a15_p1_a37)); setpz(z[5], subpz(s04_mj_s26, w8_s15_mj_s37)); setpz(z[6], addpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[7], addpz(s04_pj_s26, v8_s15_pj_s37)); } } void fwdcore8(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/8; const int N = n*s; const int N0 = 0; const int N1 = N/8; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const int N5 = N1*5; const int N6 = N1*6; const int N7 = N1*7; if (s >= 2) { for (int p = 0; p < m; p++) { const int sp = s*p; const int s8p = 8*sp; const ymm w1p = duppz3(W[1*sp]); const ymm w2p = duppz3(W[2*sp]); const ymm w3p = duppz3(W[3*sp]); const ymm w4p = mulpz2(w2p,w2p); const ymm w5p = mulpz2(w2p,w3p); const ymm w6p = mulpz2(w3p,w3p); const ymm w7p = mulpz2(w3p,w4p); for (int q = 0; q < s; q += 2) { complex_vector xq_sp = x + q + sp; complex_vector yq_s8p = y + q + s8p; const ymm x0 = getpz2(xq_sp+N0); const ymm x1 = getpz2(xq_sp+N1); const ymm x2 = getpz2(xq_sp+N2); const ymm x3 = getpz2(xq_sp+N3); const ymm x4 = getpz2(xq_sp+N4); const ymm x5 = getpz2(xq_sp+N5); const ymm x6 = getpz2(xq_sp+N6); const ymm x7 = getpz2(xq_sp+N7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); setpz2(yq_s8p+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(yq_s8p+s*1, mulpz2(w1p, addpz2(s04_mj_s26, w8_s15_mj_s37))); setpz2(yq_s8p+s*2, mulpz2(w2p, subpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*3, mulpz2(w3p, subpz2(s04_pj_s26, v8_s15_pj_s37))); setpz2(yq_s8p+s*4, mulpz2(w4p, subpz2(a04_p1_a26, a15_p1_a37))); setpz2(yq_s8p+s*5, mulpz2(w5p, subpz2(s04_mj_s26, w8_s15_mj_s37))); setpz2(yq_s8p+s*6, mulpz2(w6p, addpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*7, mulpz2(w7p, addpz2(s04_pj_s26, v8_s15_pj_s37))); } } } else { for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_8p = y + 8*p; const xmm w1p = getpz(W[p]); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm w4p = mulpz(w2p,w2p); const xmm w5p = mulpz(w2p,w3p); const xmm w6p = mulpz(w3p,w3p); const xmm w7p = mulpz(w3p,w4p); const xmm x0 = getpz(x_p[N0]); const xmm x1 = getpz(x_p[N1]); const xmm x2 = getpz(x_p[N2]); const xmm x3 = getpz(x_p[N3]); const xmm x4 = getpz(x_p[N4]); const xmm x5 = getpz(x_p[N5]); const xmm x6 = getpz(x_p[N6]); const xmm x7 = getpz(x_p[N7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); setpz(y_8p[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(y_8p[1], mulpz(w1p, addpz(s04_mj_s26, w8_s15_mj_s37))); setpz(y_8p[2], mulpz(w2p, subpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[3], mulpz(w3p, subpz(s04_pj_s26, v8_s15_pj_s37))); setpz(y_8p[4], mulpz(w4p, subpz(a04_p1_a26, a15_p1_a37))); setpz(y_8p[5], mulpz(w5p, subpz(s04_mj_s26, w8_s15_mj_s37))); setpz(y_8p[6], mulpz(w6p, addpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[7], mulpz(w7p, addpz(s04_pj_s26, v8_s15_pj_s37))); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend5(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[s]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); complex_vector z = eo ? y : x; for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); const cpx d = getpz(x[q+s*3]); const cpx e = getpz(x[q+s*4]); setpz(z[q+s*0], a + b + c + d + e); setpz(z[q+s*1], a + w1*b + w2*c + w3*d + w4*e); setpz(z[q+s*2], a + w2*b + w4*c + w1*d + w3*e); setpz(z[q+s*3], a + w3*b + w1*c + w4*d + w2*e); setpz(z[q+s*4], a + w4*b + w3*c + w2*d + w1*e); } } void fwdcore5(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int m = n/5; const int N0 = 0; const int N1 = N/5; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const cpx w1 = getpz(W[N1]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); for (int p = 0; p < m; p++) { const int sp = s*p; const cpx w1p = getpz(W[sp]); const cpx w2p = mulpz(w1p,w1p); const cpx w3p = mulpz(w1p,w2p); const cpx w4p = mulpz(w2p,w2p); for (int q = 0; q < s; q++) { const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const cpx d = getpz(x[q_sp+N3]); const cpx e = getpz(x[q_sp+N4]); const int q_s5p = q + sp*5; setpz(y[q_s5p+s*0], a + b + c + d + e); setpz(y[q_s5p+s*1], (a + w1*b + w2*c + w3*d + w4*e)*w1p); setpz(y[q_s5p+s*2], (a + w2*b + w4*c + w1*d + w3*e)*w2p); setpz(y[q_s5p+s*3], (a + w3*b + w1*c + w4*d + w2*e)*w3p); setpz(y[q_s5p+s*4], (a + w4*b + w3*c + w2*d + w1*e)*w4p); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend3(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[s]); const cpx w2 = mulpz(w1,w1); complex_vector z = eo ? y : x; for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); setpz(z[q+s*0], a + b + c); setpz(z[q+s*1], a + w1*b + w2*c); setpz(z[q+s*2], a + w2*b + w1*c); } } void fwdcore3(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int m = n/3; const int N0 = 0; const int N1 = N/3; const int N2 = N1*2; const cpx w1 = getpz(W[N1]); const cpx w2 = mulpz(w1,w1); for (int p = 0; p < m; p++) { const int sp = s*p; const cpx w1p = getpz(W[sp]); const cpx w2p = mulpz(w1p,w1p); for (int q = 0; q < s; q++) { const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const int q_s3p = q + sp*3; setpz(y[q_s3p+s*0], a + b + c); setpz(y[q_s3p+s*1], (a + w1*b + w2*c)*w1p); setpz(y[q_s3p+s*2], (a + w2*b + w1*c)*w2p); } } } /////////////////////////////////////////////////////////////////////////////// // Any Size FFT except Radix-2,3,5 /////////////////////////////////////////////////////////////////////////////// void fwdfftany(const int r, const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { static const xmm zero = { 0, 0 }; const int N = n*s; int k = r; while (n%k != 0) { if (k*k > n) { k = n; break; } k += 2; } if (k == n) { for (int q = 0; q < s; q++) { for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q+s*j]); const cpx wij = getpz(W[s*((i*j)%k)]); z = z + a*wij; } setpz(y[q+s*i], z); } } if (!eo) for (int p = 0; p < N; p++) setpz(x[p], getpz(y[p])); } else { const int m = n/k; const int ms = m*s; for (int p = 0; p < m; p++) { const int sp = s*p; for (int q = 0; q < s; q++) { const int q_sp = q + sp; const int q_spk = q + sp*k; for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q_sp+ms*j]); const cpx wij = getpz(W[ms*((i*j)%k)]); z = z + a*wij; } const cpx wip = getpz(W[i*sp]); setpz(y[q_spk+s*i], z * wip); } } } fwdfftany(k, m, k*s, !eo, y, x, W); } } /////////////////////////////////////////////////////////////////////////////// // Mixed Radix FFT /////////////////////////////////////////////////////////////////////////////// void fwdfft(const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; if (N < 2) return; if (n%8 == 0) { if (n == 8) fwdend8(s, eo, x, y); else { fwdcore8(n, s, x, y, W); fwdfft(n/8, 8*s, !eo, y, x, W); } } else if (n%4 == 0) { if (n == 4) fwdend4(s, eo, x, y); else { fwdcore4(n, s, x, y, W); fwdfft(n/4, 4*s, !eo, y, x, W); } } else if (n%2 == 0) { if (n == 2) fwdend2(s, eo, x, y); else { fwdcore2(n, s, x, y, W); fwdfft(n/2, 2*s, !eo, y, x, W); } } else if (n%5 == 0) { if (n == 5) fwdend5(s, eo, x, y, W); else { fwdcore5(n, s, x, y, W); fwdfft(n/5, 5*s, !eo, y, x, W); } } else if (n%3 == 0) { if (n == 3) fwdend3(s, eo, x, y, W); else { fwdcore3(n, s, x, y, W); fwdfft(n/3, 3*s, !eo, y, x, W); } } else fwdfftany(7, n, s, eo, x, y, W); } /////////////////////////////////////////////////////////////////////////////// // Inverse Butterfly Operation /////////////////////////////////////////////////////////////////////////////// void invend2(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+0); const ymm b = getpz2(xq+s); setpz2(zq+0, addpz2(a, b)); setpz2(zq+s, subpz2(a, b)); } } else { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); setpz(z[0], addpz(a, b)); setpz(z[1], subpz(a, b)); } } void invcore2(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/2; const int N = n*s; const int N0 = 0; const int N1 = N/2; if (s >= 2) { for (int p = 0; p < m; p++) { const int sp = s*p; const int s2p = 2*sp; const ymm wp = duppz3(W[N-sp]); for (int q = 0; q < s; q += 2) { complex_vector xq_sp = x + q + sp; complex_vector yq_s2p = y + q + s2p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); setpz2(yq_s2p+s*0, addpz2(a, b)); setpz2(yq_s2p+s*1, mulpz2(wp, subpz2(a, b))); } } } else { for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_2p = y + 2*p; const xmm wp = getpz(W[N-p]); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); setpz(y_2p[0], addpz(a, b)); setpz(y_2p[1], mulpz(wp, subpz(a, b))); } } } /////////////////////////////////////////////////////////////////////////////// void invend4(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+s*0); const ymm b = getpz2(xq+s*1); const ymm c = getpz2(xq+s*2); const ymm d = getpz2(xq+s*3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(zq+s*0, addpz2(apc, bpd)); setpz2(zq+s*1, addpz2(amc, jbmd)); setpz2(zq+s*2, subpz2(apc, bpd)); setpz2(zq+s*3, subpz2(amc, jbmd)); } } else { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); const xmm c = getpz(x[2]); const xmm d = getpz(x[3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(z[0], addpz(apc, bpd)); setpz(z[1], addpz(amc, jbmd)); setpz(z[2], subpz(apc, bpd)); setpz(z[3], subpz(amc, jbmd)); } } void invcore4(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/4; const int N = n*s; const int N0 = 0; const int N1 = N/4; const int N2 = N1*2; const int N3 = N1*3; if (s >= 2) { for (int p = 0; p < m; p++) { const int sp = s*p; const int s4p = 4*sp; const ymm w1p = duppz3(W[N-1*sp]); const ymm w2p = duppz3(W[N-2*sp]); const ymm w3p = duppz3(W[N-3*sp]); for (int q = 0; q < s; q += 2) { complex_vector xq_sp = x + q + sp; complex_vector yq_s4p = y + q + s4p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); const ymm c = getpz2(xq_sp+N2); const ymm d = getpz2(xq_sp+N3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(yq_s4p+s*0, addpz2(apc, bpd)); setpz2(yq_s4p+s*1, mulpz2(w1p, addpz2(amc, jbmd))); setpz2(yq_s4p+s*2, mulpz2(w2p, subpz2(apc, bpd))); setpz2(yq_s4p+s*3, mulpz2(w3p, subpz2(amc, jbmd))); } } } else { for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_4p = y + 4*p; const xmm w1p = cnjpz(getpz(W[p])); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); const xmm c = getpz(x_p[N2]); const xmm d = getpz(x_p[N3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(y_4p[0], addpz(apc, bpd)); setpz(y_4p[1], mulpz(w1p, addpz(amc, jbmd))); setpz(y_4p[2], mulpz(w2p, subpz(apc, bpd))); setpz(y_4p[3], mulpz(w3p, subpz(amc, jbmd))); } } } /////////////////////////////////////////////////////////////////////////////// void invend8(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm x0 = getpz2(xq+s*0); const ymm x1 = getpz2(xq+s*1); const ymm x2 = getpz2(xq+s*2); const ymm x3 = getpz2(xq+s*3); const ymm x4 = getpz2(xq+s*4); const ymm x5 = getpz2(xq+s*5); const ymm x6 = getpz2(xq+s*6); const ymm x7 = getpz2(xq+s*7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); setpz2(zq+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*1, addpz2(s04_pj_s26, v8_s15_pj_s37)); setpz2(zq+s*2, addpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*3, subpz2(s04_mj_s26, w8_s15_mj_s37)); setpz2(zq+s*4, subpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*5, subpz2(s04_pj_s26, v8_s15_pj_s37)); setpz2(zq+s*6, subpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*7, addpz2(s04_mj_s26, w8_s15_mj_s37)); } } else { const xmm x0 = getpz(x[0]); const xmm x1 = getpz(x[1]); const xmm x2 = getpz(x[2]); const xmm x3 = getpz(x[3]); const xmm x4 = getpz(x[4]); const xmm x5 = getpz(x[5]); const xmm x6 = getpz(x[6]); const xmm x7 = getpz(x[7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); setpz(z[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(z[1], addpz(s04_pj_s26, v8_s15_pj_s37)); setpz(z[2], addpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[3], subpz(s04_mj_s26, w8_s15_mj_s37)); setpz(z[4], subpz(a04_p1_a26, a15_p1_a37)); setpz(z[5], subpz(s04_pj_s26, v8_s15_pj_s37)); setpz(z[6], subpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[7], addpz(s04_mj_s26, w8_s15_mj_s37)); } } void invcore8(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/8; const int N = n*s; const int N0 = 0; const int N1 = N/8; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const int N5 = N1*5; const int N6 = N1*6; const int N7 = N1*7; if (s >= 2) { for (int p = 0; p < m; p++) { const int sp = s*p; const int s8p = 8*sp; const ymm w1p = duppz3(W[N-1*sp]); const ymm w2p = duppz3(W[N-2*sp]); const ymm w3p = duppz3(W[N-3*sp]); const ymm w4p = mulpz2(w2p,w2p); const ymm w5p = mulpz2(w2p,w3p); const ymm w6p = mulpz2(w3p,w3p); const ymm w7p = mulpz2(w3p,w4p); for (int q = 0; q < s; q += 2) { complex_vector xq_sp = x + q + sp; complex_vector yq_s8p = y + q + s8p; const ymm x0 = getpz2(xq_sp+N0); const ymm x1 = getpz2(xq_sp+N1); const ymm x2 = getpz2(xq_sp+N2); const ymm x3 = getpz2(xq_sp+N3); const ymm x4 = getpz2(xq_sp+N4); const ymm x5 = getpz2(xq_sp+N5); const ymm x6 = getpz2(xq_sp+N6); const ymm x7 = getpz2(xq_sp+N7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); setpz2(yq_s8p+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(yq_s8p+s*1, mulpz2(w1p, addpz2(s04_pj_s26, v8_s15_pj_s37))); setpz2(yq_s8p+s*2, mulpz2(w2p, addpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*3, mulpz2(w3p, subpz2(s04_mj_s26, w8_s15_mj_s37))); setpz2(yq_s8p+s*4, mulpz2(w4p, subpz2(a04_p1_a26, a15_p1_a37))); setpz2(yq_s8p+s*5, mulpz2(w5p, subpz2(s04_pj_s26, v8_s15_pj_s37))); setpz2(yq_s8p+s*6, mulpz2(w6p, subpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*7, mulpz2(w7p, addpz2(s04_mj_s26, w8_s15_mj_s37))); } } } else { for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_8p = y + 8*p; const xmm w1p = cnjpz(getpz(W[p])); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm w4p = mulpz(w2p,w2p); const xmm w5p = mulpz(w2p,w3p); const xmm w6p = mulpz(w3p,w3p); const xmm w7p = mulpz(w3p,w4p); const xmm x0 = getpz(x_p[N0]); const xmm x1 = getpz(x_p[N1]); const xmm x2 = getpz(x_p[N2]); const xmm x3 = getpz(x_p[N3]); const xmm x4 = getpz(x_p[N4]); const xmm x5 = getpz(x_p[N5]); const xmm x6 = getpz(x_p[N6]); const xmm x7 = getpz(x_p[N7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); setpz(y_8p[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(y_8p[1], mulpz(w1p, addpz(s04_pj_s26, v8_s15_pj_s37))); setpz(y_8p[2], mulpz(w2p, addpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[3], mulpz(w3p, subpz(s04_mj_s26, w8_s15_mj_s37))); setpz(y_8p[4], mulpz(w4p, subpz(a04_p1_a26, a15_p1_a37))); setpz(y_8p[5], mulpz(w5p, subpz(s04_pj_s26, v8_s15_pj_s37))); setpz(y_8p[6], mulpz(w6p, subpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[7], mulpz(w7p, addpz(s04_mj_s26, w8_s15_mj_s37))); } } } /////////////////////////////////////////////////////////////////////////////// void invend5(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[4*s]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); complex_vector z = eo ? y : x; for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); const cpx d = getpz(x[q+s*3]); const cpx e = getpz(x[q+s*4]); setpz(z[q+s*0], a + b + c + d + e); setpz(z[q+s*1], a + w1*b + w2*c + w3*d + w4*e); setpz(z[q+s*2], a + w2*b + w4*c + w1*d + w3*e); setpz(z[q+s*3], a + w3*b + w1*c + w4*d + w2*e); setpz(z[q+s*4], a + w4*b + w3*c + w2*d + w1*e); } } void invcore5(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int m = n/5; const int N0 = 0; const int N1 = N/5; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const cpx w1 = getpz(W[N4]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); for (int p = 0; p < m; p++) { const int sp = s*p; const cpx w1p = getpz(W[N-sp]); const cpx w2p = mulpz(w1p,w1p); const cpx w3p = mulpz(w1p,w2p); const cpx w4p = mulpz(w2p,w2p); for (int q = 0; q < s; q++) { const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const cpx d = getpz(x[q_sp+N3]); const cpx e = getpz(x[q_sp+N4]); const int q_s5p = q + sp*5; setpz(y[q_s5p+s*0], a + b + c + d + e); setpz(y[q_s5p+s*1], (a + w1*b + w2*c + w3*d + w4*e)*w1p); setpz(y[q_s5p+s*2], (a + w2*b + w4*c + w1*d + w3*e)*w2p); setpz(y[q_s5p+s*3], (a + w3*b + w1*c + w4*d + w2*e)*w3p); setpz(y[q_s5p+s*4], (a + w4*b + w3*c + w2*d + w1*e)*w4p); } } } /////////////////////////////////////////////////////////////////////////////// void invend3(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[2*s]); const cpx w2 = mulpz(w1,w1); complex_vector z = eo ? y : x; for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); setpz(z[q+s*0], a + b + c); setpz(z[q+s*1], a + w1*b + w2*c); setpz(z[q+s*2], a + w2*b + w1*c); } } void invcore3(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int m = n/3; const int N0 = 0; const int N1 = N/3; const int N2 = N1*2; const cpx w1 = getpz(W[N2]); const cpx w2 = mulpz(w1,w1); for (int p = 0; p < m; p++) { const int sp = s*p; const cpx w1p = getpz(W[N-sp]); const cpx w2p = mulpz(w1p,w1p); for (int q = 0; q < s; q++) { const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const int q_s3p = q + sp*3; setpz(y[q_s3p+s*0], a + b + c); setpz(y[q_s3p+s*1], (a + w1*b + w2*c)*w1p); setpz(y[q_s3p+s*2], (a + w2*b + w1*c)*w2p); } } } /////////////////////////////////////////////////////////////////////////////// // Any Size IFFT except Radix-2,3,5 /////////////////////////////////////////////////////////////////////////////// void invfftany(const int r, const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { static const xmm zero = { 0, 0 }; const int N = n*s; int k = r; while (n%k != 0) { if (k*k > n) { k = n; break; } k += 2; } if (k == n) { for (int q = 0; q < s; q++) { for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q+s*j]); const cpx wij = getpz(W[N-s*((i*j)%k)]); z = z + a*wij; } setpz(y[q+s*i], z); } } if (!eo) for (int p = 0; p < N; p++) setpz(x[p], getpz(y[p])); } else { const int m = n/k; const int ms = m*s; for (int p = 0; p < m; p++) { const int sp = s*p; for (int q = 0; q < s; q++) { const int q_sp = q + sp; const int q_spk = q + sp*k; for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q_sp+ms*j]); const cpx wij = getpz(W[N-ms*((i*j)%k)]); z = z + a*wij; } const cpx wip = getpz(W[N-i*sp]); setpz(y[q_spk+s*i], z * wip); } } } invfftany(k, m, k*s, !eo, y, x, W); } } /////////////////////////////////////////////////////////////////////////////// // Mixed Radix IFFT /////////////////////////////////////////////////////////////////////////////// void invfft(const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; if (N < 2) return; if (n%8 == 0) { if (n == 8) invend8(s, eo, x, y); else { invcore8(n, s, x, y, W); invfft(n/8, 8*s, !eo, y, x, W); } } else if (n%4 == 0) { if (n == 4) invend4(s, eo, x, y); else { invcore4(n, s, x, y, W); invfft(n/4, 4*s, !eo, y, x, W); } } else if (n%2 == 0) { if (n == 2) invend2(s, eo, x, y); else { invcore2(n, s, x, y, W); invfft(n/2, 2*s, !eo, y, x, W); } } else if (n%5 == 0) { if (n == 5) invend5(s, eo, x, y, W); else { invcore5(n, s, x, y, W); invfft(n/5, 5*s, !eo, y, x, W); } } else if (n%3 == 0) { if (n == 3) invend3(s, eo, x, y, W); else { invcore3(n, s, x, y, W); invfft(n/3, 3*s, !eo, y, x, W); } } else invfftany(7, n, s, eo, x, y, W); } /////////////////////////////////////////////////////////////////////////////// // Forward Butterfly Operation with OpenMP /////////////////////////////////////////////////////////////////////////////// void fwdend2p(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { #pragma omp for schedule(static) nowait for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+0); const ymm b = getpz2(xq+s); setpz2(zq+0, addpz2(a, b)); setpz2(zq+s, subpz2(a, b)); } } else { #pragma omp single { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); setpz(z[0], addpz(a, b)); setpz(z[1], subpz(a, b)); } } } void fwdcore2p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/2; const int N = n*s; const int N0 = 0; const int N1 = N/2; if (s >= 2) { #pragma omp for schedule(static) for (int i = 0; i < N/4; i++) { const int p = i / (s/2); const int q = i % (s/2) * 2; const int sp = s*p; const int s2p = 2*sp; const ymm wp = duppz3(W[sp]); complex_vector xq_sp = x + q + sp; complex_vector yq_s2p = y + q + s2p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); setpz2(yq_s2p+s*0, addpz2(a, b)); setpz2(yq_s2p+s*1, mulpz2(wp, subpz2(a, b))); } } else { #pragma omp for schedule(static) for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_2p = y + 2*p; const xmm wp = getpz(W[p]); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); setpz(y_2p[0], addpz(a, b)); setpz(y_2p[1], mulpz(wp, subpz(a, b))); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend4p(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { #pragma omp for schedule(static) nowait for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+s*0); const ymm b = getpz2(xq+s*1); const ymm c = getpz2(xq+s*2); const ymm d = getpz2(xq+s*3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(zq+s*0, addpz2(apc, bpd)); setpz2(zq+s*1, subpz2(amc, jbmd)); setpz2(zq+s*2, subpz2(apc, bpd)); setpz2(zq+s*3, addpz2(amc, jbmd)); } } else { #pragma omp single { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); const xmm c = getpz(x[2]); const xmm d = getpz(x[3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(z[0], addpz(apc, bpd)); setpz(z[1], subpz(amc, jbmd)); setpz(z[2], subpz(apc, bpd)); setpz(z[3], addpz(amc, jbmd)); } } } void fwdcore4p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/4; const int N = n*s; const int N0 = 0; const int N1 = N/4; const int N2 = N1*2; const int N3 = N1*3; if (s >= 2) { #pragma omp for schedule(static) for (int i = 0; i < N/8; i++) { const int p = i / (s/2); const int q = i % (s/2) * 2; const int sp = s*p; const int s4p = 4*sp; const ymm w1p = duppz3(W[1*sp]); const ymm w2p = duppz3(W[2*sp]); const ymm w3p = duppz3(W[3*sp]); complex_vector xq_sp = x + q + sp; complex_vector yq_s4p = y + q + s4p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); const ymm c = getpz2(xq_sp+N2); const ymm d = getpz2(xq_sp+N3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(yq_s4p+s*0, addpz2(apc, bpd)); setpz2(yq_s4p+s*1, mulpz2(w1p, subpz2(amc, jbmd))); setpz2(yq_s4p+s*2, mulpz2(w2p, subpz2(apc, bpd))); setpz2(yq_s4p+s*3, mulpz2(w3p, addpz2(amc, jbmd))); } } else { #pragma omp for schedule(static) for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_4p = y + 4*p; const xmm w1p = getpz(W[p]); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); const xmm c = getpz(x_p[N2]); const xmm d = getpz(x_p[N3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(y_4p[0], addpz(apc, bpd)); setpz(y_4p[1], mulpz(w1p, subpz(amc, jbmd))); setpz(y_4p[2], mulpz(w2p, subpz(apc, bpd))); setpz(y_4p[3], mulpz(w3p, addpz(amc, jbmd))); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend8p(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { #pragma omp for schedule(static) nowait for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm x0 = getpz2(xq+s*0); const ymm x1 = getpz2(xq+s*1); const ymm x2 = getpz2(xq+s*2); const ymm x3 = getpz2(xq+s*3); const ymm x4 = getpz2(xq+s*4); const ymm x5 = getpz2(xq+s*5); const ymm x6 = getpz2(xq+s*6); const ymm x7 = getpz2(xq+s*7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); setpz2(zq+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*1, addpz2(s04_mj_s26, w8_s15_mj_s37)); setpz2(zq+s*2, subpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*3, subpz2(s04_pj_s26, v8_s15_pj_s37)); setpz2(zq+s*4, subpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*5, subpz2(s04_mj_s26, w8_s15_mj_s37)); setpz2(zq+s*6, addpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*7, addpz2(s04_pj_s26, v8_s15_pj_s37)); } } else { #pragma omp single { const xmm x0 = getpz(x[0]); const xmm x1 = getpz(x[1]); const xmm x2 = getpz(x[2]); const xmm x3 = getpz(x[3]); const xmm x4 = getpz(x[4]); const xmm x5 = getpz(x[5]); const xmm x6 = getpz(x[6]); const xmm x7 = getpz(x[7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); setpz(z[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(z[1], addpz(s04_mj_s26, w8_s15_mj_s37)); setpz(z[2], subpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[3], subpz(s04_pj_s26, v8_s15_pj_s37)); setpz(z[4], subpz(a04_p1_a26, a15_p1_a37)); setpz(z[5], subpz(s04_mj_s26, w8_s15_mj_s37)); setpz(z[6], addpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[7], addpz(s04_pj_s26, v8_s15_pj_s37)); } } } void fwdcore8p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/8; const int N = n*s; const int N0 = 0; const int N1 = N/8; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const int N5 = N1*5; const int N6 = N1*6; const int N7 = N1*7; if (s >= 2) { #pragma omp for schedule(static) for (int i = 0; i < N/16; i++) { const int p = i / (s/2); const int q = i % (s/2) * 2; const int sp = s*p; const int s8p = 8*sp; const ymm w1p = duppz3(W[1*sp]); const ymm w2p = duppz3(W[2*sp]); const ymm w3p = duppz3(W[3*sp]); const ymm w4p = mulpz2(w2p,w2p); const ymm w5p = mulpz2(w2p,w3p); const ymm w6p = mulpz2(w3p,w3p); const ymm w7p = mulpz2(w3p,w4p); complex_vector xq_sp = x + q + sp; complex_vector yq_s8p = y + q + s8p; const ymm x0 = getpz2(xq_sp+N0); const ymm x1 = getpz2(xq_sp+N1); const ymm x2 = getpz2(xq_sp+N2); const ymm x3 = getpz2(xq_sp+N3); const ymm x4 = getpz2(xq_sp+N4); const ymm x5 = getpz2(xq_sp+N5); const ymm x6 = getpz2(xq_sp+N6); const ymm x7 = getpz2(xq_sp+N7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); setpz2(yq_s8p+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(yq_s8p+s*1, mulpz2(w1p, addpz2(s04_mj_s26, w8_s15_mj_s37))); setpz2(yq_s8p+s*2, mulpz2(w2p, subpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*3, mulpz2(w3p, subpz2(s04_pj_s26, v8_s15_pj_s37))); setpz2(yq_s8p+s*4, mulpz2(w4p, subpz2(a04_p1_a26, a15_p1_a37))); setpz2(yq_s8p+s*5, mulpz2(w5p, subpz2(s04_mj_s26, w8_s15_mj_s37))); setpz2(yq_s8p+s*6, mulpz2(w6p, addpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*7, mulpz2(w7p, addpz2(s04_pj_s26, v8_s15_pj_s37))); } } else { #pragma omp for schedule(static) for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_8p = y + 8*p; const xmm w1p = getpz(W[p]); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm w4p = mulpz(w2p,w2p); const xmm w5p = mulpz(w2p,w3p); const xmm w6p = mulpz(w3p,w3p); const xmm w7p = mulpz(w3p,w4p); const xmm x0 = getpz(x_p[N0]); const xmm x1 = getpz(x_p[N1]); const xmm x2 = getpz(x_p[N2]); const xmm x3 = getpz(x_p[N3]); const xmm x4 = getpz(x_p[N4]); const xmm x5 = getpz(x_p[N5]); const xmm x6 = getpz(x_p[N6]); const xmm x7 = getpz(x_p[N7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); setpz(y_8p[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(y_8p[1], mulpz(w1p, addpz(s04_mj_s26, w8_s15_mj_s37))); setpz(y_8p[2], mulpz(w2p, subpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[3], mulpz(w3p, subpz(s04_pj_s26, v8_s15_pj_s37))); setpz(y_8p[4], mulpz(w4p, subpz(a04_p1_a26, a15_p1_a37))); setpz(y_8p[5], mulpz(w5p, subpz(s04_mj_s26, w8_s15_mj_s37))); setpz(y_8p[6], mulpz(w6p, addpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[7], mulpz(w7p, addpz(s04_pj_s26, v8_s15_pj_s37))); } } } /////////////////////////////////////////////////////////////////////////////// void fwdend5p(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[s]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); complex_vector z = eo ? y : x; #pragma omp for schedule(static) nowait for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); const cpx d = getpz(x[q+s*3]); const cpx e = getpz(x[q+s*4]); setpz(z[q+s*0], a + b + c + d + e); setpz(z[q+s*1], a + w1*b + w2*c + w3*d + w4*e); setpz(z[q+s*2], a + w2*b + w4*c + w1*d + w3*e); setpz(z[q+s*3], a + w3*b + w1*c + w4*d + w2*e); setpz(z[q+s*4], a + w4*b + w3*c + w2*d + w1*e); } } void fwdcore5p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int ms = N/5; const int N0 = 0; const int N1 = N/5; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const cpx w1 = getpz(W[N1]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); #pragma omp for schedule(static) for (int i = 0; i < ms; i++) { const int p = i / s; const int q = i % s; const int sp = s*p; const cpx w1p = getpz(W[sp]); const cpx w2p = mulpz(w1p,w1p); const cpx w3p = mulpz(w1p,w2p); const cpx w4p = mulpz(w2p,w2p); const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const cpx d = getpz(x[q_sp+N3]); const cpx e = getpz(x[q_sp+N4]); const int q_s5p = q + sp*5; setpz(y[q_s5p+s*0], a + b + c + d + e); setpz(y[q_s5p+s*1], (a + w1*b + w2*c + w3*d + w4*e)*w1p); setpz(y[q_s5p+s*2], (a + w2*b + w4*c + w1*d + w3*e)*w2p); setpz(y[q_s5p+s*3], (a + w3*b + w1*c + w4*d + w2*e)*w3p); setpz(y[q_s5p+s*4], (a + w4*b + w3*c + w2*d + w1*e)*w4p); } } /////////////////////////////////////////////////////////////////////////////// void fwdend3p(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[s]); const cpx w2 = mulpz(w1,w1); complex_vector z = eo ? y : x; #pragma omp for schedule(static) nowait for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); setpz(z[q+s*0], a + b + c); setpz(z[q+s*1], a + w1*b + w2*c); setpz(z[q+s*2], a + w2*b + w1*c); } } void fwdcore3p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int ms = N/3; const int N0 = 0; const int N1 = N/3; const int N2 = N1*2; const cpx w1 = getpz(W[N1]); const cpx w2 = mulpz(w1,w1); #pragma omp for schedule(static) for (int i = 0; i < ms; i++) { const int p = i / s; const int q = i % s; const int sp = s*p; const cpx w1p = getpz(W[sp]); const cpx w2p = mulpz(w1p,w1p); const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const int q_s3p = q + sp*3; setpz(y[q_s3p+s*0], a + b + c); setpz(y[q_s3p+s*1], (a + w1*b + w2*c)*w1p); setpz(y[q_s3p+s*2], (a + w2*b + w1*c)*w2p); } } /////////////////////////////////////////////////////////////////////////////// // Any Size FFT except Radix-2,3,5 with OpenMP /////////////////////////////////////////////////////////////////////////////// void fwdfftanyp(const int r, const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { static const xmm zero = { 0, 0 }; const int N = n*s; int k = r; while (n%k != 0) { if (k*k > n) { k = n; break; } k += 2; } if (k == n) { #pragma omp for schedule(static) for (int q = 0; q < s; q++) { for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q+s*j]); const cpx wij = getpz(W[s*((i*j)%k)]); z = z + a*wij; } setpz(y[q+s*i], z); } } if (!eo) { #pragma omp for schedule(static) nowait for (int p = 0; p < N; p++) setpz(x[p], getpz(y[p])); } } else { const int m = n/k; const int ms = m*s; #pragma omp for schedule(static) for (int h = 0; h < ms; h++) { const int p = h / s; const int q = h % s; const int sp = s*p; const int q_sp = q + sp; const int q_spk = q + sp*k; for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q_sp+ms*j]); const cpx wij = getpz(W[ms*((i*j)%k)]); z = z + a*wij; } const cpx wip = getpz(W[i*sp]); setpz(y[q_spk+s*i], z * wip); } } fwdfftanyp(k, m, k*s, !eo, y, x, W); } } /////////////////////////////////////////////////////////////////////////////// // Mixed Radix FFT with OpenMP /////////////////////////////////////////////////////////////////////////////// void fwdfftp(const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; if (N < 2) return; if (n%8 == 0) { if (n == 8) fwdend8p(s, eo, x, y); else { fwdcore8p(n, s, x, y, W); fwdfftp(n/8, 8*s, !eo, y, x, W); } } else if (n%4 == 0) { if (n == 4) fwdend4p(s, eo, x, y); else { fwdcore4p(n, s, x, y, W); fwdfftp(n/4, 4*s, !eo, y, x, W); } } else if (n%2 == 0) { if (n == 2) fwdend2p(s, eo, x, y); else { fwdcore2p(n, s, x, y, W); fwdfftp(n/2, 2*s, !eo, y, x, W); } } else if (n%5 == 0) { if (n == 5) fwdend5p(s, eo, x, y, W); else { fwdcore5p(n, s, x, y, W); fwdfftp(n/5, 5*s, !eo, y, x, W); } } else if (n%3 == 0) { if (n == 3) fwdend3p(s, eo, x, y, W); else { fwdcore3p(n, s, x, y, W); fwdfftp(n/3, 3*s, !eo, y, x, W); } } else fwdfftanyp(7, n, s, eo, x, y, W); } /////////////////////////////////////////////////////////////////////////////// // Inverse Butterfly Operation with OpenMP /////////////////////////////////////////////////////////////////////////////// void invend2p(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { #pragma omp for schedule(static) nowait for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+0); const ymm b = getpz2(xq+s); setpz2(zq+0, addpz2(a, b)); setpz2(zq+s, subpz2(a, b)); } } else { #pragma omp single { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); setpz(z[0], addpz(a, b)); setpz(z[1], subpz(a, b)); } } } void invcore2p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/2; const int N = n*s; const int N0 = 0; const int N1 = N/2; if (s >= 2) { #pragma omp for schedule(static) for (int i = 0; i < N/4; i++) { const int p = i / (s/2); const int q = i % (s/2) * 2; const int sp = s*p; const int s2p = 2*sp; const ymm wp = duppz3(W[N-sp]); complex_vector xq_sp = x + q + sp; complex_vector yq_s2p = y + q + s2p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); setpz2(yq_s2p+s*0, addpz2(a, b)); setpz2(yq_s2p+s*1, mulpz2(wp, subpz2(a, b))); } } else { #pragma omp for schedule(static) for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_2p = y + 2*p; const xmm wp = getpz(W[N-p]); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); setpz(y_2p[0], addpz(a, b)); setpz(y_2p[1], mulpz(wp, subpz(a, b))); } } } /////////////////////////////////////////////////////////////////////////////// void invend4p(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { #pragma omp for schedule(static) nowait for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm a = getpz2(xq+s*0); const ymm b = getpz2(xq+s*1); const ymm c = getpz2(xq+s*2); const ymm d = getpz2(xq+s*3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(zq+s*0, addpz2(apc, bpd)); setpz2(zq+s*1, addpz2(amc, jbmd)); setpz2(zq+s*2, subpz2(apc, bpd)); setpz2(zq+s*3, subpz2(amc, jbmd)); } } else { #pragma omp single { const xmm a = getpz(x[0]); const xmm b = getpz(x[1]); const xmm c = getpz(x[2]); const xmm d = getpz(x[3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(z[0], addpz(apc, bpd)); setpz(z[1], addpz(amc, jbmd)); setpz(z[2], subpz(apc, bpd)); setpz(z[3], subpz(amc, jbmd)); } } } void invcore4p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/4; const int N = n*s; const int N0 = 0; const int N1 = N/4; const int N2 = N1*2; const int N3 = N1*3; if (s >= 2) { #pragma omp for schedule(static) for (int i = 0; i < N/8; i++) { const int p = i / (s/2); const int q = i % (s/2) * 2; const int sp = s*p; const int s4p = 4*sp; const ymm w1p = duppz3(W[N-1*sp]); const ymm w2p = duppz3(W[N-2*sp]); const ymm w3p = duppz3(W[N-3*sp]); complex_vector xq_sp = x + q + sp; complex_vector yq_s4p = y + q + s4p; const ymm a = getpz2(xq_sp+N0); const ymm b = getpz2(xq_sp+N1); const ymm c = getpz2(xq_sp+N2); const ymm d = getpz2(xq_sp+N3); const ymm apc = addpz2(a, c); const ymm amc = subpz2(a, c); const ymm bpd = addpz2(b, d); const ymm jbmd = jxpz2(subpz2(b, d)); setpz2(yq_s4p+s*0, addpz2(apc, bpd)); setpz2(yq_s4p+s*1, mulpz2(w1p, addpz2(amc, jbmd))); setpz2(yq_s4p+s*2, mulpz2(w2p, subpz2(apc, bpd))); setpz2(yq_s4p+s*3, mulpz2(w3p, subpz2(amc, jbmd))); } } else { #pragma omp for schedule(static) for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_4p = y + 4*p; const xmm w1p = cnjpz(getpz(W[p])); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm a = getpz(x_p[N0]); const xmm b = getpz(x_p[N1]); const xmm c = getpz(x_p[N2]); const xmm d = getpz(x_p[N3]); const xmm apc = addpz(a, c); const xmm amc = subpz(a, c); const xmm bpd = addpz(b, d); const xmm jbmd = jxpz(subpz(b, d)); setpz(y_4p[0], addpz(apc, bpd)); setpz(y_4p[1], mulpz(w1p, addpz(amc, jbmd))); setpz(y_4p[2], mulpz(w2p, subpz(apc, bpd))); setpz(y_4p[3], mulpz(w3p, subpz(amc, jbmd))); } } } /////////////////////////////////////////////////////////////////////////////// void invend8p(const int s, const bool eo, complex_vector x, complex_vector y) noexcept { complex_vector z = eo ? y : x; if (s >= 2) { #pragma omp for schedule(static) nowait for (int q = 0; q < s; q += 2) { complex_vector xq = x + q; complex_vector zq = z + q; const ymm x0 = getpz2(xq+s*0); const ymm x1 = getpz2(xq+s*1); const ymm x2 = getpz2(xq+s*2); const ymm x3 = getpz2(xq+s*3); const ymm x4 = getpz2(xq+s*4); const ymm x5 = getpz2(xq+s*5); const ymm x6 = getpz2(xq+s*6); const ymm x7 = getpz2(xq+s*7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); setpz2(zq+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*1, addpz2(s04_pj_s26, v8_s15_pj_s37)); setpz2(zq+s*2, addpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*3, subpz2(s04_mj_s26, w8_s15_mj_s37)); setpz2(zq+s*4, subpz2(a04_p1_a26, a15_p1_a37)); setpz2(zq+s*5, subpz2(s04_pj_s26, v8_s15_pj_s37)); setpz2(zq+s*6, subpz2(a04_m1_a26, j_a15_m1_a37)); setpz2(zq+s*7, addpz2(s04_mj_s26, w8_s15_mj_s37)); } } else { #pragma omp single { const xmm x0 = getpz(x[0]); const xmm x1 = getpz(x[1]); const xmm x2 = getpz(x[2]); const xmm x3 = getpz(x[3]); const xmm x4 = getpz(x[4]); const xmm x5 = getpz(x[5]); const xmm x6 = getpz(x[6]); const xmm x7 = getpz(x[7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); setpz(z[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(z[1], addpz(s04_pj_s26, v8_s15_pj_s37)); setpz(z[2], addpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[3], subpz(s04_mj_s26, w8_s15_mj_s37)); setpz(z[4], subpz(a04_p1_a26, a15_p1_a37)); setpz(z[5], subpz(s04_pj_s26, v8_s15_pj_s37)); setpz(z[6], subpz(a04_m1_a26, j_a15_m1_a37)); setpz(z[7], addpz(s04_mj_s26, w8_s15_mj_s37)); } } } void invcore8p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int m = n/8; const int N = n*s; const int N0 = 0; const int N1 = N/8; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const int N5 = N1*5; const int N6 = N1*6; const int N7 = N1*7; if (s >= 2) { #pragma omp for schedule(static) for (int i = 0; i < N/16; i++) { const int p = i / (s/2); const int q = i % (s/2) * 2; const int sp = s*p; const int s8p = 8*sp; const ymm w1p = duppz3(W[N-1*sp]); const ymm w2p = duppz3(W[N-2*sp]); const ymm w3p = duppz3(W[N-3*sp]); const ymm w4p = mulpz2(w2p,w2p); const ymm w5p = mulpz2(w2p,w3p); const ymm w6p = mulpz2(w3p,w3p); const ymm w7p = mulpz2(w3p,w4p); complex_vector xq_sp = x + q + sp; complex_vector yq_s8p = y + q + s8p; const ymm x0 = getpz2(xq_sp+N0); const ymm x1 = getpz2(xq_sp+N1); const ymm x2 = getpz2(xq_sp+N2); const ymm x3 = getpz2(xq_sp+N3); const ymm x4 = getpz2(xq_sp+N4); const ymm x5 = getpz2(xq_sp+N5); const ymm x6 = getpz2(xq_sp+N6); const ymm x7 = getpz2(xq_sp+N7); const ymm a04 = addpz2(x0, x4); const ymm s04 = subpz2(x0, x4); const ymm a26 = addpz2(x2, x6); const ymm js26 = jxpz2(subpz2(x2, x6)); const ymm a15 = addpz2(x1, x5); const ymm s15 = subpz2(x1, x5); const ymm a37 = addpz2(x3, x7); const ymm js37 = jxpz2(subpz2(x3, x7)); const ymm a04_p1_a26 = addpz2(a04, a26); const ymm s04_pj_s26 = addpz2(s04, js26); const ymm a04_m1_a26 = subpz2(a04, a26); const ymm s04_mj_s26 = subpz2(s04, js26); const ymm a15_p1_a37 = addpz2(a15, a37); const ymm v8_s15_pj_s37 = v8xpz2(addpz2(s15, js37)); const ymm j_a15_m1_a37 = jxpz2(subpz2(a15, a37)); const ymm w8_s15_mj_s37 = w8xpz2(subpz2(s15, js37)); setpz2(yq_s8p+s*0, addpz2(a04_p1_a26, a15_p1_a37)); setpz2(yq_s8p+s*1, mulpz2(w1p, addpz2(s04_pj_s26, v8_s15_pj_s37))); setpz2(yq_s8p+s*2, mulpz2(w2p, addpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*3, mulpz2(w3p, subpz2(s04_mj_s26, w8_s15_mj_s37))); setpz2(yq_s8p+s*4, mulpz2(w4p, subpz2(a04_p1_a26, a15_p1_a37))); setpz2(yq_s8p+s*5, mulpz2(w5p, subpz2(s04_pj_s26, v8_s15_pj_s37))); setpz2(yq_s8p+s*6, mulpz2(w6p, subpz2(a04_m1_a26, j_a15_m1_a37))); setpz2(yq_s8p+s*7, mulpz2(w7p, addpz2(s04_mj_s26, w8_s15_mj_s37))); } } else { #pragma omp for schedule(static) for (int p = 0; p < m; p++) { complex_vector x_p = x + p; complex_vector y_8p = y + 8*p; const xmm w1p = cnjpz(getpz(W[p])); const xmm w2p = mulpz(w1p,w1p); const xmm w3p = mulpz(w1p,w2p); const xmm w4p = mulpz(w2p,w2p); const xmm w5p = mulpz(w2p,w3p); const xmm w6p = mulpz(w3p,w3p); const xmm w7p = mulpz(w3p,w4p); const xmm x0 = getpz(x_p[N0]); const xmm x1 = getpz(x_p[N1]); const xmm x2 = getpz(x_p[N2]); const xmm x3 = getpz(x_p[N3]); const xmm x4 = getpz(x_p[N4]); const xmm x5 = getpz(x_p[N5]); const xmm x6 = getpz(x_p[N6]); const xmm x7 = getpz(x_p[N7]); const xmm a04 = addpz(x0, x4); const xmm s04 = subpz(x0, x4); const xmm a26 = addpz(x2, x6); const xmm js26 = jxpz(subpz(x2, x6)); const xmm a15 = addpz(x1, x5); const xmm s15 = subpz(x1, x5); const xmm a37 = addpz(x3, x7); const xmm js37 = jxpz(subpz(x3, x7)); const xmm a04_p1_a26 = addpz(a04, a26); const xmm s04_pj_s26 = addpz(s04, js26); const xmm a04_m1_a26 = subpz(a04, a26); const xmm s04_mj_s26 = subpz(s04, js26); const xmm a15_p1_a37 = addpz(a15, a37); const xmm v8_s15_pj_s37 = v8xpz(addpz(s15, js37)); const xmm j_a15_m1_a37 = jxpz(subpz(a15, a37)); const xmm w8_s15_mj_s37 = w8xpz(subpz(s15, js37)); setpz(y_8p[0], addpz(a04_p1_a26, a15_p1_a37)); setpz(y_8p[1], mulpz(w1p, addpz(s04_pj_s26, v8_s15_pj_s37))); setpz(y_8p[2], mulpz(w2p, addpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[3], mulpz(w3p, subpz(s04_mj_s26, w8_s15_mj_s37))); setpz(y_8p[4], mulpz(w4p, subpz(a04_p1_a26, a15_p1_a37))); setpz(y_8p[5], mulpz(w5p, subpz(s04_pj_s26, v8_s15_pj_s37))); setpz(y_8p[6], mulpz(w6p, subpz(a04_m1_a26, j_a15_m1_a37))); setpz(y_8p[7], mulpz(w7p, addpz(s04_mj_s26, w8_s15_mj_s37))); } } } /////////////////////////////////////////////////////////////////////////////// void invend5p(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[4*s]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); complex_vector z = eo ? y : x; #pragma omp for schedule(static) nowait for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); const cpx d = getpz(x[q+s*3]); const cpx e = getpz(x[q+s*4]); setpz(z[q+s*0], a + b + c + d + e); setpz(z[q+s*1], a + w1*b + w2*c + w3*d + w4*e); setpz(z[q+s*2], a + w2*b + w4*c + w1*d + w3*e); setpz(z[q+s*3], a + w3*b + w1*c + w4*d + w2*e); setpz(z[q+s*4], a + w4*b + w3*c + w2*d + w1*e); } } void invcore5p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int ms = N/5; const int N0 = 0; const int N1 = N/5; const int N2 = N1*2; const int N3 = N1*3; const int N4 = N1*4; const cpx w1 = getpz(W[N4]); const cpx w2 = mulpz(w1,w1); const cpx w3 = mulpz(w1,w2); const cpx w4 = mulpz(w2,w2); #pragma omp for schedule(static) for (int i = 0; i < ms; i++) { const int p = i / s; const int q = i % s; const int sp = s*p; const cpx w1p = getpz(W[N-sp]); const cpx w2p = mulpz(w1p,w1p); const cpx w3p = mulpz(w1p,w2p); const cpx w4p = mulpz(w2p,w2p); const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const cpx d = getpz(x[q_sp+N3]); const cpx e = getpz(x[q_sp+N4]); const int q_s5p = q + sp*5; setpz(y[q_s5p+s*0], a + b + c + d + e); setpz(y[q_s5p+s*1], (a + w1*b + w2*c + w3*d + w4*e)*w1p); setpz(y[q_s5p+s*2], (a + w2*b + w4*c + w1*d + w3*e)*w2p); setpz(y[q_s5p+s*3], (a + w3*b + w1*c + w4*d + w2*e)*w3p); setpz(y[q_s5p+s*4], (a + w4*b + w3*c + w2*d + w1*e)*w4p); } } /////////////////////////////////////////////////////////////////////////////// void invend3p(const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const cpx w1 = getpz(W[2*s]); const cpx w2 = mulpz(w1,w1); complex_vector z = eo ? y : x; #pragma omp for schedule(static) nowait for (int q = 0; q < s; q++) { const cpx a = getpz(x[q+s*0]); const cpx b = getpz(x[q+s*1]); const cpx c = getpz(x[q+s*2]); setpz(z[q+s*0], a + b + c); setpz(z[q+s*1], a + w1*b + w2*c); setpz(z[q+s*2], a + w2*b + w1*c); } } void invcore3p(const int n, const int s, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; const int ms = N/3; const int N0 = 0; const int N1 = N/3; const int N2 = N1*2; const cpx w1 = getpz(W[N2]); const cpx w2 = mulpz(w1,w1); #pragma omp for schedule(static) for (int i = 0; i < ms; i++) { const int p = i / s; const int q = i % s; const int sp = s*p; const cpx w1p = getpz(W[N-sp]); const cpx w2p = mulpz(w1p,w1p); const int q_sp = q + sp; const cpx a = getpz(x[q_sp+N0]); const cpx b = getpz(x[q_sp+N1]); const cpx c = getpz(x[q_sp+N2]); const int q_s3p = q + sp*3; setpz(y[q_s3p+s*0], a + b + c); setpz(y[q_s3p+s*1], (a + w1*b + w2*c)*w1p); setpz(y[q_s3p+s*2], (a + w2*b + w1*c)*w2p); } } /////////////////////////////////////////////////////////////////////////////// // Any Size IFFT except Radix-2,3,5 with OpenMP /////////////////////////////////////////////////////////////////////////////// void invfftanyp(const int r, const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { static const xmm zero = { 0, 0 }; const int N = n*s; int k = r; while (n%k != 0) { if (k*k > n) { k = n; break; } k += 2; } if (k == n) { #pragma omp for schedule(static) for (int q = 0; q < s; q++) { for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q+s*j]); const cpx wij = getpz(W[N-s*((i*j)%k)]); z = z + a*wij; } setpz(y[q+s*i], z); } } if (!eo) { #pragma omp for schedule(static) nowait for (int p = 0; p < N; p++) setpz(x[p], getpz(y[p])); } } else { const int m = n/k; const int ms = m*s; #pragma omp for schedule(static) for (int h = 0; h < ms; h++) { const int p = h / s; const int q = h % s; const int sp = s*p; const int q_sp = q + sp; const int q_spk = q + sp*k; for (int i = 0; i < k; i++) { cpx z = zero; for (int j = 0; j < k; j++) { const cpx a = getpz(x[q_sp+ms*j]); const cpx wij = getpz(W[N-ms*((i*j)%k)]); z = z + a*wij; } const cpx wip = getpz(W[N-i*sp]); setpz(y[q_spk+s*i], z * wip); } } invfftanyp(k, m, k*s, !eo, y, x, W); } } /////////////////////////////////////////////////////////////////////////////// // Mixed Radix IFFT with OpenMP /////////////////////////////////////////////////////////////////////////////// void invfftp(const int n, const int s, const bool eo, complex_vector x, complex_vector y, const_complex_vector W) noexcept { const int N = n*s; if (N < 2) return; if (n%8 == 0) { if (n == 8) invend8p(s, eo, x, y); else { invcore8p(n, s, x, y, W); invfftp(n/8, 8*s, !eo, y, x, W); } } else if (n%4 == 0) { if (n == 4) invend4p(s, eo, x, y); else { invcore4p(n, s, x, y, W); invfftp(n/4, 4*s, !eo, y, x, W); } } else if (n%2 == 0) { if (n == 2) invend2p(s, eo, x, y); else { invcore2p(n, s, x, y, W); invfftp(n/2, 2*s, !eo, y, x, W); } } else if (n%5 == 0) { if (n == 5) invend5p(s, eo, x, y, W); else { invcore5p(n, s, x, y, W); invfftp(n/5, 5*s, !eo, y, x, W); } } else if (n%3 == 0) { if (n == 3) invend3p(s, eo, x, y, W); else { invcore3p(n, s, x, y, W); invfftp(n/3, 3*s, !eo, y, x, W); } } else invfftanyp(7, n, s, eo, x, y, W); } } ///////////////////////////////////////////////////////////////////////////// }
GB_binop__ge_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_uint64 // A.*B function (eWiseMult): GB_AemultB__ge_uint64 // A*D function (colscale): GB_AxD__ge_uint64 // D*A function (rowscale): GB_DxB__ge_uint64 // C+=B function (dense accum): GB_Cdense_accumB__ge_uint64 // C+=b function (dense accum): GB_Cdense_accumb__ge_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint64 // C=scalar+B GB_bind1st__ge_uint64 // C=scalar+B' GB_bind1st_tran__ge_uint64 // C=A+scalar GB_bind2nd__ge_uint64 // C=A'+scalar GB_bind2nd_tran__ge_uint64 // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT64 || GxB_NO_GE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ge_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
SharedComponents.h
/***************************************************************************** * * Copyright (c) 2003-2020 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014-2017 by Centre for Geoscience Computing (GeoComp) * Development from 2019 by School of Earth and Environmental Sciences ** *****************************************************************************/ /****************************************************************************/ /* Paso: shared components */ /****************************************************************************/ /* Author: Lutz Gross, l.gross@uq.edu.au */ /****************************************************************************/ #ifndef __PASO_SHAREDCOMPONENTS_H__ #define __PASO_SHAREDCOMPONENTS_H__ #include "Paso.h" namespace paso { struct SharedComponents; typedef boost::shared_ptr<SharedComponents> SharedComponents_ptr; typedef boost::shared_ptr<const SharedComponents> const_SharedComponents_ptr; struct PASO_DLL_API SharedComponents { SharedComponents(dim_t localLength, const std::vector<int>& neighbours, const index_t* sharedArray, const std::vector<index_t>& offset, index_t m = 1, index_t b = 0) : local_length(localLength*m), neighbour(neighbours), offsetInShared(offset) { if (offset.empty()) { numSharedComponents = 0; } else { numSharedComponents = offset[neighbours.size()] * m; } shared = new index_t[numSharedComponents]; if (!neighbours.empty() && !offset.empty()) { if (m != 1) { for (int i = 0; i < offsetInShared.size(); i++) { offsetInShared[i] *= m; } } #pragma omp parallel for for (dim_t i = 0; i < offset[neighbours.size()]; i++) { const index_t itmp = m * sharedArray[i] + b; for (dim_t j = 0; j < m; ++j) shared[m*i+j] = itmp+j; } } else { offsetInShared[neighbours.size()] = 0; } } ~SharedComponents() { delete[] shared; } /// local array length shared dim_t local_length; /// list of the processors sharing values with this processor std::vector<int> neighbour; /// offsetInShared[i] points to the first input value in array shared /// for processor i. Has length numNeighbors+1 std::vector<index_t> offsetInShared; /// list of the (local) components which are shared with other processors. /// Has length numSharedComponents index_t* shared; /// = offsetInShared[numNeighbours] dim_t numSharedComponents; }; } // namespace paso #endif // __PASO_SHAREDCOMPONENTS_H__
GB_unop__identity_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint16) // op(A') function: GB (_unop_tran__identity_fp32_uint16) // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint16) ( float *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if ((double) image->delay > floor(geometry_info.rho+0.5)) image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); } else if ((flags & LessValue) != 0) { if ((double) image->delay < floor(geometry_info.rho+0.5)) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else image->delay=(size_t) CastDoubleToLong(floor(geometry_info.rho+0.5)); if ((flags & SigmaValue) != 0) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; ImageType image_type; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; image_type=images->type; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->type != images->type) image_type=UndefinedType; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (image_type != BilevelType) { if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); } append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside != MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); image->mask_trait=UpdatePixelTrait; clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale_x, scale_y; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale_x=1.0; scale_y=1.0; if (image->columns != 0) scale_x=(double) columns/(double) image->columns; if (image->rows != 0) scale_y=(double) rows/(double) image->rows; clone_image->page.width=(size_t) CastDoubleToLong(floor(scale_x* image->page.width+0.5)); clone_image->page.height=(size_t) CastDoubleToLong(floor(scale_y* image->page.height+0.5)); if (MagickAbsoluteValue(scale_x-scale_y) < 2.0) scale_x=scale_y=MagickMin(scale_x,scale_y); clone_image->page.x=CastDoubleToLong(ceil(scale_x*image->page.x-0.5)); clone_image->tile_offset.x=CastDoubleToLong(ceil(scale_x* image->tile_offset.x-0.5)); clone_image->page.y=CastDoubleToLong(ceil(scale_y*image->page.y-0.5)); clone_image->tile_offset.y=CastDoubleToLong(ceil(scale_y* image->tile_offset.y-0.5)); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; const char *p; int c; MagickBooleanType canonical; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse) return(strlen(filename)); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; char *r; ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) > (QuantumRange/2)) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ static const MagickInfo *SetImageInfoFromExtension(ImageInfo *image_info, const char *component,char *magic,ExceptionInfo *exception) { const MagickInfo *magick_info; MagickFormatType format_type; ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ return(magick_info); } MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], path[MagickPathExtent], *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); if (*component != '\0') { /* Base path sans any compression extension. */ GetPathComponent(image_info->filename,BasePathSansCompressExtension,path); GetPathComponent(path,ExtensionPath,component); } image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) magick_info=SetImageInfoFromExtension(image_info,component,magic, sans_exception); /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=(const DelegateInfo *) NULL; if (magick_info == (const MagickInfo *) NULL) { delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if ((delegate_info == (const DelegateInfo *) NULL) && ((*component != '\0') && (IsGlob(component) == MagickFalse))) { /* Retry in case GetMagickInfo loaded a custom module. */ magick_info=SetImageInfoFromExtension(image_info,component,magic, sans_exception); } } if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireQuantumMemory(1,magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; const Quantum *p; ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; const Quantum *p; ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; magick_unreferenced(exception); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.blue_primary.y=geometry_info.sigma; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.green_primary.y=geometry_info.sigma; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.red_primary.y=geometry_info.sigma; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=image->chromaticity.white_point.x; if ((flags & SigmaValue) != 0) image->chromaticity.white_point.y=geometry_info.sigma; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
rawMD4_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2010 by Solar Designer * Copyright (c) 2011, 2012 by magnum * * Use of Bartavelle's mmx/sse2/intrinsics and reduced binary size by * magnum in 2011-2012. * * OMP added May 2013, JimF * BE SIMD logic added 2017, JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawMD4; #elif FMT_REGISTERS_H john_register_one(&fmt_rawMD4); #else #include <string.h> #include "arch.h" #include "md4.h" #include "common.h" #include "johnswap.h" #include "formats.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif //#undef SIMD_COEF_32 //#undef SIMD_PARA_MD4 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-MD4" #define FORMAT_NAME "" #define ALGORITHM_NAME "MD4 " MD4_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4) #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #ifndef MD4_BUF_SIZ #define MD4_BUF_SIZ 16 #endif #define CIPHERTEXT_LENGTH 32 #define DIGEST_SIZE 16 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define FORMAT_TAG "$MD4$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests tests[] = { {"8a9d093f14f8701df17732b2bb182c74", "password"}, {FORMAT_TAG "6d78785c44ea8dfa178748b245d8c3ae", "magnum" }, {"6d78785c44ea8dfa178748b245d8c3ae", "magnum" }, {"6D78785C44EA8DFA178748B245D8C3AE", "magnum" }, {FORMAT_TAG "31d6cfe0d16ae931b73c59d7e0c089c0", "" }, {FORMAT_TAG "934eb897904769085af8101ad9dabca2", "John the ripper" }, {FORMAT_TAG "cafbb81fb64d9dd286bc851c4c6e0d21", "lolcode" }, {FORMAT_TAG "585028aa0f794af812ee3be8804eb14a", "123456" }, {FORMAT_TAG "23580e2a459f7ea40f9efa148b63cafb", "12345" }, {FORMAT_TAG "2ae523785d0caf4d2fb557c12016185c", "123456789" }, {FORMAT_TAG "f3e80e83b29b778bc092bf8a7c6907fe", "iloveyou" }, {FORMAT_TAG "4d10a268a303379f224d8852f2d13f11", "princess" }, {FORMAT_TAG "bf75555ca19051f694224f2f5e0b219d", "1234567" }, {FORMAT_TAG "41f92cf74e3d2c3ba79183629a929915", "rockyou" }, {FORMAT_TAG "012d73e0fab8d26e0f4d65e36077511e", "12345678" }, {FORMAT_TAG "0ceb1fd260c35bd50005341532748de6", "abc123" }, {"8be1ec697b14ad3a53b371436120641d", "1"}, {"114c5a33b8d4127fbe492bd6583aeb4d", "12"}, {"c58cda49f00748a3bc0fcfa511d516cb", "123"}, {"f375f401ddc698af533f16f8ac1e91c1", "1234"}, {NULL} }; #ifdef SIMD_COEF_32 #define PLAINTEXT_LENGTH 55 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #include "common-simd-getpos.h" #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 static uint32_t (*saved_key)[MD4_BUF_SIZ*NBKEYS]; static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; #else static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifndef SIMD_COEF_32 saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #else saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); #endif } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); #ifndef SIMD_COEF_32 MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1]; if (ciphertext[0] == '$' && !strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpylwr(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); return out; } static void *get_binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[DIGEST_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext += TAG_LENGTH; for (i=0; i<4; i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; #if ARCH_LITTLE_ENDIAN || defined(SIMD_COEF_32) out[i] = temp; #else out[i] = JOHNSWAP(temp); #endif } #if defined(SIMD_COEF_32) && defined(REVERSE_STEPS) md4_reverse(out); #endif return out; } static char *source(char *source, void *binary) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; uint32_t b[4]; char *p; int i, j; memcpy(b, binary, sizeof(b)); #if SIMD_COEF_32 && defined(REVERSE_STEPS) md4_unreverse(b); #endif #if !ARCH_LITTLE_ENDIAN && !defined(SIMD_COEF_32) alter_endianity(b, 16); #endif p = &out[TAG_LENGTH]; for (i = 0; i < 4; i++) for (j = 0; j < 8; j++) *p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf]; return out; } #define NON_SIMD_SET_SAVED_LEN #include "common-simd-setkey32.h" #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; index++) #endif { #if SIMD_COEF_32 SIMDmd4body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN); #else MD4_CTX ctx; MD4_Init(&ctx); MD4_Update(&ctx, saved_key[index], saved_len[index]); MD4_Final((unsigned char *)crypt_key[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y; #ifdef _OPENMP const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32; #else const unsigned int c = SIMD_PARA_MD4; #endif for (y = 0; y < c; y++) for (x = 0; x < SIMD_COEF_32; x++) { if ( ((uint32_t*)binary)[1] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x+SIMD_COEF_32] ) return 1; } return 0; #else unsigned int index = 0; #ifdef _OPENMP for (index = 0; index < count; index++) #endif if (!memcmp(binary, crypt_key[index], BINARY_SIZE)) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x = index&(SIMD_COEF_32-1); unsigned int y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)binary)[1] == ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32]; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_COEF_32 uint32_t crypt_key[DIGEST_SIZE / 4]; MD4_CTX ctx; char *key = get_key(index); MD4_Init(&ctx); MD4_Update(&ctx, key, strlen(key)); MD4_Final((void*)crypt_key, &ctx); #if !ARCH_LITTLE_ENDIAN alter_endianity(crypt_key, 16); #endif #if defined(REVERSE_STEPS) md4_reverse(crypt_key); #endif return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE); #else return 1; #endif } #ifdef SIMD_COEF_32 #define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4+SIMD_COEF_32 static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_6; } #endif static int binary_hash_0(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_0; } static int binary_hash_1(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_1; } static int binary_hash_2(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_2; } static int binary_hash_3(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_3; } static int binary_hash_4(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_4; } static int binary_hash_5(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_5; } static int binary_hash_6(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_6; } struct fmt_main fmt_rawMD4 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_uint32 // op(A') function: GB_tran__abs_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_uint32 ( int8_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mixed_tentusscher_myo_epi_2004_S2_17.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rc) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_17.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5413847249581,0.00129749333274554,0.779049320216350,0.778898229736094,0.000175386577749891,0.484810494199284,0.00294593233512741,0.999998339142758,1.94217050104558e-08,1.89785399117775e-05,0.999772756079176,1.00727534190360,0.999997440785955,4.09273550037733e-05,0.410743063693995,10.9424848182514,138.731054625637}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.6526831901002,0.000336603613824894,0.000142032316714142,0.000147797037794095,0.244877435259635,0.136552852378623,0.180909422982719,4.68260453463487,0.0136308755837635,1.00097696778612,1088.15434244063,0.000484016332794955,0.441709817218134,0.0199531034368028,0.00354996431590630,4.97623621373625e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
thbasic.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <limits.h> #include "thnets.h" #define THAtomicIncrement(a) __sync_fetch_and_add(a, 1); #define THAtomicDecrement(a) __sync_fetch_and_add(a, -1); THFloatStorage *THFloatStorage_new(long size) { THFloatStorage *s = malloc(sizeof(*s)); s->data = malloc(sizeof(*s->data) * size); s->nref = 1; s->mustfree = 1; return s; } THFloatStorage *THFloatStorage_newwithbuffer(void *buffer) { THFloatStorage *s = malloc(sizeof(*s)); s->data = buffer; s->nref = 1; s->mustfree = 0; return s; } void THFloatStorage_free(THFloatStorage *s) { THAtomicDecrement(&s->nref); if(s->nref == 0) { #ifdef CUDNN if(s->mustfree == 2) cudaFree(s->data); else #endif if(s->mustfree) free(s->data); free(s); } } void THFloatTensor_resize(THFloatTensor *t, long *size, int nDimension) { int i; long stride = 1; t->nDimension = nDimension; memcpy(t->size, size, nDimension * sizeof(*t->size)); for(i = nDimension - 1; i >= 0; i--) { t->stride[i] = stride; stride *= t->size[i]; } if(!t->storage) t->storage = THFloatStorage_new(stride); } void THFloatTensor_resize4d(THFloatTensor *t, long size0, long size1, long size2, long size3) { t->nDimension = 4; t->size[0] = size0; t->size[1] = size1; t->size[2] = size2; t->size[3] = size3; t->stride[3] = 1; t->stride[2] = size3; t->stride[1] = size2 * size3; t->stride[0] = size1 * size2 * size3; if(!t->storage) t->storage = THFloatStorage_new(size0 * size1 * size2 * size3); } void THFloatTensor_resize3d(THFloatTensor *t, long size0, long size1, long size2) { t->nDimension = 3; t->size[0] = size0; t->size[1] = size1; t->size[2] = size2; t->stride[2] = 1; t->stride[1] = size2; t->stride[0] = size1 * size2; if(!t->storage) t->storage = THFloatStorage_new(size0 * size1 * size2); } void THFloatTensor_resize2d(THFloatTensor *t, long size0, long size1) { t->nDimension = 2; t->size[0] = size0; t->size[1] = size1; t->stride[1] = 1; t->stride[0] = size1; if(!t->storage) t->storage = THFloatStorage_new(size0 * size1); } void THFloatTensor_resize1d(THFloatTensor *t, long size0) { t->nDimension = 1; t->size[0] = size0; t->stride[0] = 1; if(!t->storage) t->storage = THFloatStorage_new(size0); } void THError(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); exit(-1); } void THFloatTensor_free(THFloatTensor *t) { if(!t) return; if(t->storage) THFloatStorage_free(t->storage); free(t); } THFloatTensor *THFloatTensor_newSelect(THFloatTensor *tensor, int dimension, long sliceIndex) { if(dimension) THError("THFloatTensor_newSelect not implemented for dimension != 0"); THFloatTensor *t = malloc(sizeof(*t)); t->nDimension = tensor->nDimension - 1; t->size[0] = tensor->size[1]; t->size[1] = tensor->size[2]; t->size[2] = tensor->size[3]; t->stride[0] = tensor->stride[1]; t->stride[1] = tensor->stride[2]; t->stride[2] = tensor->stride[3]; t->storage = tensor->storage; THAtomicIncrement(&t->storage->nref); t->storageOffset = sliceIndex * tensor->stride[0]; return t; } long THFloatTensor_nElement(THFloatTensor *t) { if(t->nDimension == 0) return 0; else { long nElement = 1; int i; for(i = 0; i < t->nDimension; i++) nElement *= t->size[i]; return nElement; } } void THFloatTensor_resizeAs(THFloatTensor *tdst, THFloatTensor *tsrc) { if(tsrc == tdst) return; long nelemsrc = THFloatTensor_nElement(tsrc); tdst->nDimension = tsrc->nDimension; memcpy(tdst->size, tsrc->size, sizeof(tsrc->size)); memcpy(tdst->stride, tsrc->stride, sizeof(tsrc->stride)); if(!tdst->storage) tdst->storage = THFloatStorage_new(nelemsrc); else if(nelemsrc != THFloatTensor_nElement(tdst)) { if(tdst->storage) tdst->storage->data = realloc(tdst->storage->data, sizeof(*tdst->storage->data) * nelemsrc); else tdst->storage = THFloatStorage_new(nelemsrc); } } void THFloatTensor_set(THFloatTensor *tdst, THFloatTensor *tsrc) { if(tsrc == tdst) return; if(tdst->storage) THFloatStorage_free(tdst->storage); *tdst = *tsrc; THAtomicIncrement(&tsrc->storage->nref); } float *THFloatTensor_data(THFloatTensor *tensor) { return tensor->storage->data + tensor->storageOffset; } THFloatTensor *THFloatTensor_new() { return calloc(1, sizeof(THFloatTensor)); } THFloatTensor *THFloatTensor_newWithStorage3d(THFloatStorage *storage, long storageOffset, long size0, long stride0, long size1, long stride1, long size2, long stride2) { THFloatTensor *t = THFloatTensor_new(); t->nDimension = 3; t->size[0] = size0; t->size[1] = size1; t->size[2] = size2; t->stride[0] = stride0 == -1 ? size1 * size2 : stride0; t->stride[1] = stride1 == -1 ? size2 : stride1; t->stride[2] = stride2 == -1 ? 1 : stride2; t->storage = storage; t->storageOffset = storageOffset; THAtomicIncrement(&t->storage->nref); return t; } THFloatTensor *THFloatTensor_newWithStorage2d(THFloatStorage *storage, long storageOffset, long size0, long stride0, long size1, long stride1) { THFloatTensor *t = THFloatTensor_new(); t->nDimension = 2; t->size[0] = size0; t->size[1] = size1; t->stride[0] = stride0 == -1 ? size1 : stride0; t->stride[1] = stride1 == -1 ? 1 : stride1; t->storage = storage; t->storageOffset = storageOffset; THAtomicIncrement(&t->storage->nref); return t; } THFloatTensor *THFloatTensor_newWithStorage1d(THFloatStorage *storage, long storageOffset, long size0, long stride0) { THFloatTensor *t = THFloatTensor_new(); t->nDimension = 1; t->size[0] = size0; t->stride[0] = stride0 == -1 ? 1 : stride0; t->storage = storage; t->storageOffset = storageOffset; THAtomicIncrement(&t->storage->nref); return t; } THFloatTensor *THFloatTensor_newWithTensor(THFloatTensor *tensor) { THFloatTensor *self = THFloatTensor_new(); THFloatTensor_set(self, tensor); return self; } void THFloatTensor_zero(THFloatTensor *t) { memset(t->storage->data, 0, THFloatTensor_nElement(t) * sizeof(*t->storage->data)); } void THFloatTensor_fill(THFloatTensor *t, float value) { THFloatVector_fill(t->storage->data, value, THFloatTensor_nElement(t)); } void THFloatTensor_copy(THFloatTensor *tdst, THFloatTensor *tsrc) { memcpy(tdst->storage->data, tsrc->storage->data, sizeof(*tdst->storage->data) * THFloatTensor_nElement(tsrc)); } void THFloatTensor_transpose(THFloatTensor *tdst, THFloatTensor *tsrc, int dimension1, int dimension2) { long z; if(!tsrc) tsrc = tdst; THFloatTensor_set(tdst, tsrc); if(dimension1 == dimension2) return; z = tdst->stride[dimension1]; tdst->stride[dimension1] = tdst->stride[dimension2]; tdst->stride[dimension2] = z; z = tdst->size[dimension1]; tdst->size[dimension1] = tdst->size[dimension2]; tdst->size[dimension2] = z; } THFloatTensor *THFloatTensor_newTranspose(THFloatTensor *tensor, int dimension1_, int dimension2_) { THFloatTensor *self = THFloatTensor_newWithTensor(tensor); THFloatTensor_transpose(self, NULL, dimension1_, dimension2_); return self; } double THExpMinusApprox(double x) { #if EXACT_EXPONENTIAL return exp(-x); #else /* fast approximation of exp(-x) for x positive */ # define A0 (1.0) # define A1 (0.125) # define A2 (0.0078125) # define A3 (0.00032552083) # define A4 (1.0172526e-5) if (x < 13.0) { /* assert(x>=0); */ double y; y = A0+x*(A1+x*(A2+x*(A3+x*A4))); y *= y; y *= y; y *= y; y = 1/y; return y; } return 0; # undef A0 # undef A1 # undef A2 # undef A3 # undef A4 #endif } extern void sgemm_(char *transa, char *transb, int *m, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc); static void THBlas_gemm(char transa, char transb, long m, long n, long k, float alpha, float *a, long lda, float *b, long ldb, float beta, float *c, long ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); if(n == 1) ldc = m; if(transa_) { if(m == 1) lda = k; } else { if(k == 1) lda = m; } if(transb_) { if(k == 1) ldb = n; } else { if(n == 1) ldb = k; } if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; sgemm_(&transa, &transb, &i_m, &i_n, &i_k, &alpha, a, &i_lda, b, &i_ldb, &beta, c, &i_ldc); return; } THError("Wrong parameters to gemm"); } void sgemv_(char *trans, int *m, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy); void THBlas_gemv(char trans, long m, long n, float alpha, float *a, long lda, float *x, long incx, float beta, float *y, long incy) { if(n == 1) lda = m; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; sgemv_(&trans, &i_m, &i_n, &alpha, a, &i_lda, x, &i_incx, &beta, y, &i_incy); return; } } void sger_(int *m, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda); void THBlas_ger(long m, long n, float alpha, float *x, long incx, float *y, long incy, float *a, long lda) { if(n == 1) lda = m; int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; sger_(&i_m, &i_n, &alpha, x, &i_incx, y, &i_incy, a, &i_lda); } void THFloatTensor_addmm(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *m1, THFloatTensor *m2) { char transpose_r, transpose_m1, transpose_m2; THFloatTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2)) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(m1->size[1] != m2->size[0]) THError("size mismatch, m1: %ld, m2: %ld", m1->size[1], m2->size[0]); if( t->nDimension != 2 ) THError("matrix expected, got %dD tensor for t", t->nDimension); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) THError("size mismatch, t: %ld, m1: %ld, t: %ld, m2: %ld", t->size[0], m1->size[1], t->size[1], m2->size[1]); if(t != r_) THError("Not implemented: t != r"); /* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */ /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THFloatTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { THError("Transpose not implemented (1)"); return; /* transpose_r = 'n'; r__ = THFloatTensor_newWithSize2d(r_->size[1], r_->size[0]); THFloatTensor_copy(r__, r_); THFloatTensor_transpose(r__, NULL, 0, 1);*/ } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { THError("Transpose not implemented (2)"); return; /*transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THFloatTensor_newContiguous(m1);*/ } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { THError("Transpose not implemented (3)"); return; /*transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THFloatTensor_(newContiguous)(m2);*/ } /* do the operation */ THBlas_gemm(transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THFloatTensor_data(m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THFloatTensor_data(m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THFloatTensor_data(r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); /* free intermediate variables */ if(m1_ != m1) THFloatTensor_free(m1_); if(m2_ != m2) THFloatTensor_free(m2_); if(r__ != r_) THError("freeCopyTo not implemented"); /*THFloatTensor_(freeCopyTo)(r__, r_);*/ } void THFloatTensor_addmv(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *mat, THFloatTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension); if( mat->size[1] != vec->size[0] ) THError("size mismatch, %s, %s", mat->size[1], vec->size[0]); if(t->nDimension != 1) THError("vector expected, got t: %dD", t->nDimension); if(t->size[0] != mat->size[0]) THError("size mismatch, t: %ld, mat: %ld", t->size[0], mat->size[0]); if(r_ != t) THError("r_ != t not implemented"); if(mat->stride[0] == 1) { THBlas_gemv('n', mat->size[0], mat->size[1], alpha, THFloatTensor_data(mat), mat->stride[1], THFloatTensor_data(vec), vec->stride[0], beta, THFloatTensor_data(r_), r_->stride[0]); } else if(mat->stride[1] == 1) { THBlas_gemv('t', mat->size[1], mat->size[0], alpha, THFloatTensor_data(mat), mat->stride[0], THFloatTensor_data(vec), vec->stride[0], beta, THFloatTensor_data(r_), r_->stride[0]); } else THError("addmv for non-contiguous not implemented"); } #define TH_OMP_OVERHEAD_THRESHOLD 100000 void THFloatTensor_mul(THFloatTensor *r_, THFloatTensor *t, float value) { float *tp = THFloatTensor_data(t); float *rp = THFloatTensor_data(r_); long i; long sz = THFloatTensor_nElement(t); #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] * value; } void THFloatTensor_addr(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *vec1, THFloatTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension); if(t->nDimension != 2) THError("expected matrix, got %dD tensor for t", t->nDimension); if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) THError("size mismatch, t: %ld, vec1: %ld, t: %ld, vec2: %ld", t->size[0], vec1->size[0], t->size[1], vec2->size[0]); if(r_ != t) THError("r_ != t not implemented"); if(beta != 1) THFloatTensor_mul(r_, r_, beta); if(r_->stride[0] == 1) { THBlas_ger(vec1->size[0], vec2->size[0], alpha, THFloatTensor_data(vec1), vec1->stride[0], THFloatTensor_data(vec2), vec2->stride[0], THFloatTensor_data(r_), r_->stride[1]); } else if(r_->stride[1] == 1) { THBlas_ger(vec2->size[0], vec1->size[0], alpha, THFloatTensor_data(vec2), vec2->stride[0], THFloatTensor_data(vec1), vec1->stride[0], THFloatTensor_data(r_), r_->stride[0]); } else THError("addr for non-contiguous not implemented"); } void printtensor(THFloatTensor *t) { if(t->nDimension == 2) { int i, j; for(i = 0; i < t->size[0]; i++) { printf("%d) ", i); for(j = 0; j < t->size[1]; j++) printf("%f ", t->storage->data[i * t->stride[0] + j]); printf("\n"); } } else printf("printtensor: nDimension not implemented\n"); } void THFloatTensor_validXCorr2Dptr(float *r_, float alpha, float *t_, long ir, long ic, float *k_, long kr, long kc, long sr, long sc) { long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ float *pi_ = t_ + yy*sr*ic + xx*sc; float *pw_ = k_; float sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { /* SSE-based convolution */ for(yy = 0; yy < or; yy++) { float *pi_ = t_ + yy*sr*ic; float *pw_ = k_; for (ky = 0; ky < kr; ky++) { float *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THFloatVector_add(r_, pis_, alpha*pw_[kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } r_ += oc; } } } void THFloatTensor_conv2Dmv(THFloatTensor *r_, float beta, float alpha, THFloatTensor *t_, THFloatTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0, kstride1; THFloatTensor *input; THFloatTensor *kernel; float *input_data; float *weight_data; float *output_data; long nelem; long k; if(t_->nDimension != 3) THError("input: 3D Tensor expected"); if(k_->nDimension != 4) THError("kernel: 4D Tensor expected"); if(srow < 1) THError("Stride should be a positive integer"); if(scol < 1) THError("Stride should be a positive integer"); if(*vf != 'V' || *xc != 'X') THError("Type of convolution can be 'V','X' only"); input = t_; kernel = k_; nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; if(kernel->size[1] != nInputPlane) THError("invalid number of input planes"); if(!(nInputRows >= nKernelRows && nInputCols >= nKernelCols)) THError("conv2Dmv : Input image is smaller than kernel"); nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; nelem = THFloatTensor_nElement(r_); THFloatTensor_resize3d(r_, nOutputPlane, nOutputRows, nOutputCols); input_data = THFloatTensor_data(input); weight_data = THFloatTensor_data(kernel); output_data = THFloatTensor_data(r_); if (nelem == 0 || beta == 0 || nelem != THFloatTensor_nElement(r_)) { /*THFloatTensor_zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { float* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THFloatTensor_mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { float* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ float *ptr_output = output_data + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ float *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ float *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ THFloatTensor_validXCorr2Dptr(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } } } void THFloatTensor_conv2Dmm(THFloatTensor *r_, float beta, float alpha, THFloatTensor *t_, THFloatTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long kstride0, kstride1; THFloatTensor *input; THFloatTensor* kernel; long nbatch; long nelem; float *input_data; float *weight_data; float *output_data; long p; if(t_->nDimension != 4) THError("input: 3D Tensor expected"); if(k_->nDimension != 4) THError("kernel: 4D Tensor expected"); if(srow < 1) THError("Stride should be a positive integer"); if(scol < 1) THError("Stride should be a positive integer"); if(*vf != 'V' || *xc != 'X') THError("Type of convolution can be 'V','X' only"); input = t_; kernel = k_; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; if(kernel->size[1] != nInputPlane) THError("invalid number of input planes"); if(!(nInputRows >= nKernelRows && nInputCols >= nKernelCols)) THError("conv2Dmv : Input image is smaller than kernel"); nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; nelem = THFloatTensor_nElement(r_); THFloatTensor_resize4d(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); input_data = THFloatTensor_data(input); weight_data = THFloatTensor_data(kernel); output_data = THFloatTensor_data(r_); if (nelem == 0 || beta == 0 || nelem != THFloatTensor_nElement(r_)) { /*THFloatTensor_(zero)(r_);*/ #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { float* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /*THFloatTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(p) for(p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { float* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } } #pragma omp parallel for private(p) for(p=0; p < nbatch; p++) { long k; for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ float *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ float *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ float *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; /* do image, kernel convolution */ THFloatTensor_validXCorr2Dptr(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } } } }
GB_binop__pair_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_uint64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT64 || GxB_NO_PAIR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
Example_tasking.8.c
/* * @@name: tasking.8c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.0 */ int tp; #pragma omp threadprivate(tp) int var; void work() { #pragma omp parallel { /* do work here */ #pragma omp task { tp++; /* do work here */ #pragma omp task { /* do work here but don't modify tp */ } var = tp; //Value does not change after write above } } }
SystemMatrix.h
/***************************************************************************** * * Copyright (c) 2003-2020 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014-2017 by Centre for Geoscience Computing (GeoComp) * Development from 2019 by School of Earth and Environmental Sciences ** *****************************************************************************/ /****************************************************************************/ /* Paso: SystemMatrix */ /****************************************************************************/ /* Copyrights by ACcESS Australia 2003,2004,2005,2006 */ /* Author: Lutz Gross, l.gross@uq.edu.au */ /****************************************************************************/ #ifndef __PASO_SYSTEMMATRIX_H__ #define __PASO_SYSTEMMATRIX_H__ #include "SparseMatrix.h" #include "SystemMatrixPattern.h" #include <escript/AbstractSystemMatrix.h> namespace paso { struct Options; template <class T> class SystemMatrix; template <typename T> using SystemMatrix_ptr = boost::shared_ptr<SystemMatrix<T> >; template <typename T> using const_SystemMatrix_ptr = boost::shared_ptr<const SystemMatrix<T> >; typedef int SystemMatrixType; /// this class holds a (distributed) stiffness matrix template <class T> class SystemMatrix : public escript::AbstractSystemMatrix { public: /// default constructor - throws exception. SystemMatrix(); SystemMatrix(SystemMatrixType type, SystemMatrixPattern_ptr pattern, dim_t rowBlockSize, dim_t columnBlockSize, bool patternIsUnrolled, const escript::FunctionSpace& rowFS, const escript::FunctionSpace& colFS); ~SystemMatrix(); /// Nullifies rows and columns in the matrix. /// The rows and columns are marked by positive values in mask_row and /// mask_col. Values on the main diagonal which are marked to set to /// zero by both mask_row and mask_col are set to main_diagonal_value. virtual void nullifyRowsAndCols(escript::Data& mask_row, escript::Data& mask_col, double main_diagonal_value); virtual inline void saveMM(const std::string& filename) const { if (mpi_info->size > 1) { //throw PasoException("SystemMatrix::saveMM: Only single rank supported."); SparseMatrix_ptr<T> merged(mergeSystemMatrix()); if (mpi_info->rank == 0) merged->saveMM(filename.c_str()); } else { mainBlock->saveMM(filename.c_str()); } } virtual inline void saveHB(const std::string& filename) const { if (mpi_info->size > 1) { throw PasoException("SystemMatrix::saveHB: Only single rank supported."); } else if (!(type & MATRIX_FORMAT_CSC)) { throw PasoException("SystemMatrix::saveHB: Only CSC format supported."); } else { mainBlock->saveHB_CSC(filename.c_str()); } } virtual void resetValues(bool preserveSolverData = false); /// Nullifies rows in the matrix. /// The rows are marked by positive values in mask_row. Values on the /// main diagonal which are marked to set to zero by mask_row are set /// to main_diagonal_value. void nullifyRows(double* mask_row, double main_diagonal_value); void add(dim_t, index_t*, dim_t, dim_t, index_t*, dim_t, double*); void makeZeroRowSums(double* left_over); /// copies the col_coupleBlock into row_coupleBlock. /// WARNING: this method uses mpi_requests of the coupler attached to the /// matrix. No reordering on the received columns is performed. /// In practice this means that components in /// row_coupleBlock->pattern->index and /// row_coupler->connector->recv->shared /// are ordered by increasing value. /// Note that send and receive row_coupler->connectors are swapping roles. void copyColCoupleBlock(); void copyRemoteCoupleBlock(bool recreatePattern); void fillWithGlobalCoordinates(double f1); void print() const; /// Merges the system matrix which is distributed on several MPI ranks /// into a complete sparse matrix on rank 0. Used by the Merged Solver. SparseMatrix_ptr<T> mergeSystemMatrix() const; void mergeMainAndCouple(index_t** p_ptr, index_t** p_idx, double** p_val) const; void mergeMainAndCouple_CSR_OFFSET0(index_t** p_ptr, index_t** p_idx, double** p_val) const; void mergeMainAndCouple_CSR_OFFSET0_Block(index_t** p_ptr, index_t** p_idx, double** p_val) const; void mergeMainAndCouple_CSC_OFFSET1(index_t** p_ptr, index_t** p_idx, double** p_val) const; void copyMain_CSC_OFFSET1(index_t** p_ptr, index_t** p_idx, double** p_val); void extendedRowsForST(dim_t* degree_ST, index_t* offset_ST, index_t* ST); void applyBalanceInPlace(double* x, bool RHS) const; void applyBalance(double* x_out, const double* x, bool RHS) const; void balance(); double getGlobalSize() const; void setPreconditioner(Options* options); /// Applies the preconditioner. /// This method needs to be called within a parallel region. /// Barrier synchronization is performed before the evaluation to make /// sure that the input vector is available void solvePreconditioner(double* x, double* b); void freePreconditioner(); index_t* borrowMainDiagonalPointer() const; inline void startCollect(const double* in) const { startColCollect(in); } inline double* finishCollect() const { return finishColCollect(); } inline void startColCollect(const double* in) const { col_coupler->startCollect(in); } inline double* finishColCollect() const { return col_coupler->finishCollect(); } inline void startRowCollect(const double* in) { row_coupler->startCollect(in); } inline double* finishRowCollect() { return row_coupler->finishCollect(); } inline dim_t getNumRows() const { return mainBlock->numRows; } inline dim_t getNumCols() const { return mainBlock->numCols; } inline dim_t getTotalNumRows() const { return getNumRows() * row_block_size; } inline dim_t getTotalNumCols() const { return getNumCols() * col_block_size; } inline dim_t getRowOverlap() const { return row_coupler->getNumOverlapComponents(); } inline dim_t getColOverlap() const { return col_coupler->getNumOverlapComponents(); } inline dim_t getGlobalNumRows() const { if (type & MATRIX_FORMAT_CSC) { return pattern->input_distribution->getGlobalNumComponents(); } return pattern->output_distribution->getGlobalNumComponents(); } inline dim_t getGlobalNumCols() const { if (type & MATRIX_FORMAT_CSC) { return pattern->output_distribution->getGlobalNumComponents(); } return pattern->input_distribution->getGlobalNumComponents(); } inline dim_t getGlobalTotalNumRows() const { return getGlobalNumRows() * row_block_size; } inline dim_t getGlobalTotalNumCols() const { return getGlobalNumCols() * col_block_size; } inline double getSparsity() const { return getGlobalSize() / ((double)getGlobalTotalNumRows()*getGlobalTotalNumCols()); } inline dim_t getNumOutput() const { return pattern->getNumOutput(); } inline void copyBlockFromMainDiagonal(double* out) const { mainBlock->copyBlockFromMainDiagonal(out); } inline void copyBlockToMainDiagonal(const double* in) { mainBlock->copyBlockToMainDiagonal(in); } inline void copyFromMainDiagonal(double* out) const { mainBlock->copyFromMainDiagonal(out); } inline void copyToMainDiagonal(const double* in) { mainBlock->copyToMainDiagonal(in); } inline void setValues(double value) { mainBlock->setValues(value); col_coupleBlock->setValues(value); row_coupleBlock->setValues(value); is_balanced = false; } inline void rowSum(double* row_sum) const { if ((type & MATRIX_FORMAT_CSC) || (type & MATRIX_FORMAT_OFFSET1)) { throw PasoException("SystemMatrix::rowSum: No normalization " "available for compressed sparse column or index offset 1."); } else { const dim_t nrow = mainBlock->numRows*row_block_size; #pragma omp parallel for for (index_t irow=0; irow<nrow; ++irow) { row_sum[irow]=0.; } mainBlock->addRow_CSR_OFFSET0(row_sum); col_coupleBlock->addRow_CSR_OFFSET0(row_sum); } } void MatrixVector(double alpha, const T* in, double beta, T* out) const; void MatrixVector_CSR_OFFSET0(double alpha, const double* in, double beta, double* out) const; static SystemMatrix_ptr<double> loadMM_toCSR(const char* filename); static SystemMatrix_ptr<double> loadMM_toCSC(const char* filename); static int getSystemMatrixTypeId(int solver, int preconditioner, int package, bool is_complex, bool symmetry, const escript::JMPI& mpi_info); SystemMatrixType type; SystemMatrixPattern_ptr pattern; dim_t logical_row_block_size; dim_t logical_col_block_size; dim_t row_block_size; dim_t col_block_size; dim_t block_size; escript::Distribution_ptr row_distribution; escript::Distribution_ptr col_distribution; escript::JMPI mpi_info; Coupler_ptr<real_t> col_coupler; Coupler_ptr<real_t> row_coupler; /// main block SparseMatrix_ptr<T> mainBlock; /// coupling to neighbouring processors (row - col) SparseMatrix_ptr<T> col_coupleBlock; /// coupling to neighbouring processors (col - row) SparseMatrix_ptr<T> row_coupleBlock; /// coupling of rows-cols on neighbouring processors (may not be valid) SparseMatrix_ptr<T> remote_coupleBlock; bool is_balanced; /// matrix may be balanced by a diagonal matrix D=diagonal(balance_vector) /// if is_balanced is true, the matrix stored is D*A*D where A is the /// original matrix. /// When the system of linear equations is solved we solve D*A*D*y=c. /// So to solve A*x=b one needs to set c=D*b and x=D*y. double* balance_vector; /// stores the global ids for all cols in col_coupleBlock mutable index_t* global_id; /// package code controlling the solver pointer mutable index_t solver_package; /// pointer to data needed by a solver void* solver_p; private: virtual void setToSolution(escript::Data& out, escript::Data& in, boost::python::object& options) const; virtual void ypAx(escript::Data& y, escript::Data& x) const; void solve(T* out, T* in, Options* options) const; }; void RHS_loadMM_toCSR(const char* filename, double* b, dim_t size); } // namespace paso #include "Options.h" #include "Solver.h" #include <escript/Data.h> namespace paso { template <> SparseMatrix_ptr<double> PASO_DLL_API SystemMatrix<double>::mergeSystemMatrix() const; template <> SparseMatrix_ptr<cplx_t> PASO_DLL_API SystemMatrix<cplx_t>::mergeSystemMatrix() const; template <> void PASO_DLL_API SystemMatrix<double>::MatrixVector(double alpha, const double* in, double beta, double* out) const; template <> void PASO_DLL_API SystemMatrix<cplx_t>::MatrixVector(double alpha, const cplx_t* in, double beta, cplx_t* out) const; template <> void PASO_DLL_API SystemMatrix<double>::solve(double* out, double* in, Options* options) const; template <> void PASO_DLL_API SystemMatrix<cplx_t>::solve(cplx_t* out, cplx_t* in, Options* options) const; template <class T> SystemMatrix<T>::SystemMatrix() { throw PasoException("SystemMatrix: Illegal to generate default SystemMatrix."); } /// Allocates a SystemMatrix of given type using the given matrix pattern. /// Values are initialized with zero. /// If patternIsUnrolled and type & MATRIX_FORMAT_BLK1, it is assumed /// that the pattern is already unrolled to match the requested block size /// and offsets. Otherwise unrolling and offset adjustment will be performed. template <class T> SystemMatrix<T>::SystemMatrix(SystemMatrixType ntype, SystemMatrixPattern_ptr npattern, dim_t rowBlockSize, dim_t colBlockSize, bool patternIsUnrolled, const escript::FunctionSpace& rowFS, const escript::FunctionSpace& colFS) : escript::AbstractSystemMatrix(rowBlockSize, rowFS, colBlockSize, colFS), type(ntype), logical_row_block_size(rowBlockSize), logical_col_block_size(colBlockSize), is_balanced(false), balance_vector(NULL), global_id(NULL), solver_package(PASO_PASO), solver_p(NULL) { if (patternIsUnrolled) { if ((ntype & MATRIX_FORMAT_OFFSET1) != (npattern->type & MATRIX_FORMAT_OFFSET1)) { throw PasoException("SystemMatrix: requested offset and pattern offset do not match."); } } // do we need to apply unrolling? bool unroll // we don't like non-square blocks = (rowBlockSize != colBlockSize) #ifndef ESYS_HAVE_LAPACK // or any block size bigger than 3 || (colBlockSize > 3) #endif // or if block size one requested and the block size is not 1 || ((ntype & MATRIX_FORMAT_BLK1) && colBlockSize > 1) // or the offsets don't match || ((ntype & MATRIX_FORMAT_OFFSET1) != (npattern->type & MATRIX_FORMAT_OFFSET1)); SystemMatrixType pattern_format_out = (ntype & MATRIX_FORMAT_OFFSET1) ? MATRIX_FORMAT_OFFSET1 : MATRIX_FORMAT_DEFAULT; mpi_info = npattern->mpi_info; if (ntype & MATRIX_FORMAT_CSC) { if (unroll) { if (patternIsUnrolled) { pattern=npattern; } else { pattern = npattern->unrollBlocks(pattern_format_out, colBlockSize, rowBlockSize); } row_block_size = 1; col_block_size = 1; } else { pattern = npattern->unrollBlocks(pattern_format_out, 1, 1); row_block_size = rowBlockSize; col_block_size = colBlockSize; } row_distribution = pattern->input_distribution; col_distribution = pattern->output_distribution; } else { if (unroll) { if (patternIsUnrolled) { pattern = npattern; } else { pattern = npattern->unrollBlocks(pattern_format_out, rowBlockSize, colBlockSize); } row_block_size = 1; col_block_size = 1; } else { pattern = npattern->unrollBlocks(pattern_format_out, 1, 1); row_block_size = rowBlockSize; col_block_size = colBlockSize; } row_distribution = pattern->output_distribution; col_distribution = pattern->input_distribution; } if (ntype & MATRIX_FORMAT_DIAGONAL_BLOCK) { block_size = std::min(row_block_size, col_block_size); } else { block_size = row_block_size*col_block_size; } col_coupler.reset(new Coupler<real_t>(pattern->col_connector, col_block_size, mpi_info)); row_coupler.reset(new Coupler<real_t>(pattern->row_connector, row_block_size, mpi_info)); mainBlock.reset(new SparseMatrix<T>(type, pattern->mainPattern, row_block_size, col_block_size, true)); col_coupleBlock.reset(new SparseMatrix<T>(type, pattern->col_couplePattern, row_block_size, col_block_size, true)); row_coupleBlock.reset(new SparseMatrix<T>(type, pattern->row_couplePattern, row_block_size, col_block_size, true)); const dim_t n_norm = std::max(mainBlock->numCols*col_block_size, mainBlock->numRows*row_block_size); balance_vector = new double[n_norm]; #pragma omp parallel for for (dim_t i=0; i<n_norm; ++i) balance_vector[i] = 1.; } // deallocates a SystemMatrix template <class T> SystemMatrix<T>::~SystemMatrix() { solve_free(this); delete[] balance_vector; delete[] global_id; } template <class T> int SystemMatrix<T>::getSystemMatrixTypeId(int solver, int preconditioner, int package, bool is_complex, bool symmetry, const escript::JMPI& mpi_info) { int out = -1; int true_package = Options::getPackage(Options::mapEscriptOption(solver), Options::mapEscriptOption(package), symmetry, mpi_info); switch(true_package) { case PASO_PASO: out = MATRIX_FORMAT_DEFAULT; break; case PASO_MKL: out = MATRIX_FORMAT_BLK1 | MATRIX_FORMAT_OFFSET1; break; case PASO_UMFPACK: if (mpi_info->size > 1) { throw PasoException("The selected solver UMFPACK " "requires CSC format which is not supported with " "more than one rank."); } else { out = MATRIX_FORMAT_CSC | MATRIX_FORMAT_BLK1; } break; case PASO_MUMPS: out = MATRIX_FORMAT_BLK1 | MATRIX_FORMAT_OFFSET1; break; default: throw PasoException("unknown package code"); } if (out > 0 && is_complex) out |= MATRIX_FORMAT_COMPLEX; return out; } template <class T> void SystemMatrix<T>::nullifyRowsAndCols(escript::Data& row_q, escript::Data& col_q, double main_diagonal_value) { if (row_q.isComplex() || col_q.isComplex()) { throw PasoException("SystemMatrix::nullifyRowsAndCols: complex arguments not supported"); } if (col_q.getDataPointSize() != getColumnBlockSize()) { throw PasoException("nullifyRowsAndCols: column block size does not match the number of components of column mask."); } else if (row_q.getDataPointSize() != getRowBlockSize()) { throw PasoException("nullifyRowsAndCols: row block size does not match the number of components of row mask."); } else if (col_q.getFunctionSpace() != getColumnFunctionSpace()) { throw PasoException("nullifyRowsAndCols: column function space and function space of column mask don't match."); } else if (row_q.getFunctionSpace() != getRowFunctionSpace()) { throw PasoException("nullifyRowsAndCols: row function space and function space of row mask don't match."); } row_q.expand(); col_q.expand(); row_q.requireWrite(); col_q.requireWrite(); double* mask_row = row_q.getExpandedVectorReference(static_cast<escript::DataTypes::real_t>(0)).data(); double* mask_col = col_q.getExpandedVectorReference(static_cast<escript::DataTypes::real_t>(0)).data(); if (mpi_info->size > 1) { if (type & MATRIX_FORMAT_CSC) { throw PasoException("SystemMatrix::nullifyRowsAndCols: " "CSC is not supported with MPI."); } startColCollect(mask_col); startRowCollect(mask_row); if (col_block_size==1 && row_block_size==1) { mainBlock->nullifyRowsAndCols_CSR_BLK1(mask_row, mask_col, main_diagonal_value); double* remote_values = finishColCollect(); col_coupleBlock->nullifyRowsAndCols_CSR_BLK1(mask_row, remote_values, 0.); remote_values = finishRowCollect(); row_coupleBlock->nullifyRowsAndCols_CSR_BLK1(remote_values, mask_col, 0.); } else { mainBlock->nullifyRowsAndCols_CSR(mask_row, mask_col, main_diagonal_value); double* remote_values = finishColCollect(); col_coupleBlock->nullifyRowsAndCols_CSR(mask_row, remote_values, 0.); remote_values = finishRowCollect(); row_coupleBlock->nullifyRowsAndCols_CSR(remote_values, mask_col, 0.); } } else { if (col_block_size==1 && row_block_size==1) { if (type & MATRIX_FORMAT_CSC) { mainBlock->nullifyRowsAndCols_CSC_BLK1(mask_row, mask_col, main_diagonal_value); } else { mainBlock->nullifyRowsAndCols_CSR_BLK1(mask_row, mask_col, main_diagonal_value); } } else { if (type & MATRIX_FORMAT_CSC) { mainBlock->nullifyRowsAndCols_CSC(mask_row, mask_col, main_diagonal_value); } else { mainBlock->nullifyRowsAndCols_CSR(mask_row, mask_col, main_diagonal_value); } } } } template <class T> void SystemMatrix<T>::resetValues(bool preserveSolverData) { setValues(0.); if (!preserveSolverData) solve_free(this); } template <class T> void SystemMatrix<T>::setToSolution(escript::Data& out, escript::Data& in, boost::python::object& options) const { #if !defined(ESYS_HAVE_MUMPS) if (in.isComplex() || out.isComplex()) { throw PasoException("SystemMatrix::setToSolution: complex arguments not supported."); } #endif options.attr("resetDiagnostics")(); Options paso_options(options); if (out.getDataPointSize() != getColumnBlockSize()) { throw PasoException("solve: column block size does not match the number of components of solution."); } else if (in.getDataPointSize() != getRowBlockSize()) { throw PasoException("solve: row block size does not match the number of components of right hand side."); } else if (out.getFunctionSpace() != getColumnFunctionSpace()) { throw PasoException("solve: column function space and function space of solution don't match."); } else if (in.getFunctionSpace() != getRowFunctionSpace()) { throw PasoException("solve: row function space and function space of right hand side don't match."); } out.expand(); in.expand(); out.requireWrite(); in.requireWrite(); T* out_dp = out.getExpandedVectorReference(static_cast<T>(0)).data(); T* in_dp = in.getExpandedVectorReference(static_cast<T>(0)).data(); solve(out_dp, in_dp, &paso_options); paso_options.updateEscriptDiagnostics(options); } template <class T> void SystemMatrix<T>::ypAx(escript::Data& y, escript::Data& x) const { #if !defined(ESYS_HAVE_MUMPS) if (x.isComplex() || y.isComplex()) { throw PasoException("SystemMatrix::ypAx: complex arguments not supported."); } #endif if (x.getDataPointSize() != getColumnBlockSize()) { throw PasoException("matrix vector product: column block size does not match the number of components in input."); } else if (y.getDataPointSize() != getRowBlockSize()) { throw PasoException("matrix vector product: row block size does not match the number of components in output."); } else if (x.getFunctionSpace() != getColumnFunctionSpace()) { throw PasoException("matrix vector product: column function space and function space of input don't match."); } else if (y.getFunctionSpace() != getRowFunctionSpace()) { throw PasoException("matrix vector product: row function space and function space of output don't match."); } x.expand(); y.expand(); x.requireWrite(); y.requireWrite(); T* x_dp = x.getExpandedVectorReference(static_cast<T>(0)).data(); T* y_dp = y.getExpandedVectorReference(static_cast<T>(0)).data(); MatrixVector(1., x_dp, 1., y_dp); } } // namespace paso #endif // __PASO_SYSTEMMATRIX_H__
FeatureFinderAlgorithmPicked.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2013. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Oliver Kohlbacher, Stephan Aiche $ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #ifndef OPENMS_TRANSFORMATIONS_FEATUREFINDER_FEATUREFINDERALGORITHMPICKED_H #define OPENMS_TRANSFORMATIONS_FEATUREFINDER_FEATUREFINDERALGORITHMPICKED_H #include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/FeatureFinderAlgorithm.h> #include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h> #include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/TraceFitter.h> #include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/EGHTraceFitter.h> #include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/GaussTraceFitter.h> #include <OpenMS/FORMAT/MzMLFile.h> #include <OpenMS/FORMAT/FeatureXMLFile.h> #include <OpenMS/FORMAT/TextFile.h> #include <OpenMS/CHEMISTRY/IsotopeDistribution.h> #include <OpenMS/MATH/STATISTICS/StatisticFunctions.h> #include <OpenMS/MATH/MISC/MathFunctions.h> #include <OpenMS/CONCEPT/Constants.h> #include <OpenMS/CHEMISTRY/Element.h> #include <OpenMS/CHEMISTRY/ElementDB.h> #include <OpenMS/CHEMISTRY/IsotopeDistribution.h> #include <boost/math/special_functions/fpclassify.hpp> #include <numeric> #include <fstream> #include <algorithm> #include <QtCore/QDir> #ifdef _OPENMP #include <omp.h> #endif namespace OpenMS { /** @brief FeatureFinderAlgorithm for picked peaks. @htmlinclude OpenMS_FeatureFinderAlgorithmPicked.parameters @improvement RT model with tailing/fronting (Marc) @improvement More general MZ model - e.g. based on co-elution or with sulfur-averagines (Marc) @todo Fix output in parallel mode, change assignment of charges to threads, add parallel TOPP test (Marc) @todo Implement user-specified seed lists support (Marc) @ingroup FeatureFinder */ template <class PeakType, class FeatureType> class FeatureFinderAlgorithmPicked : public FeatureFinderAlgorithm<PeakType, FeatureType>, public FeatureFinderDefs { public: /// @name Type definitions //@{ typedef typename FeatureFinderAlgorithm<PeakType, FeatureType>::MapType MapType; typedef typename FeatureFinderAlgorithm<PeakType, FeatureType>::FeatureMapType FeatureMapType; typedef typename MapType::SpectrumType SpectrumType; typedef typename SpectrumType::FloatDataArrays FloatDataArrays; //@} using FeatureFinderAlgorithm<PeakType, FeatureType>::param_; using FeatureFinderAlgorithm<PeakType, FeatureType>::features_; using FeatureFinderAlgorithm<PeakType, FeatureType>::ff_; using FeatureFinderAlgorithm<PeakType, FeatureType>::defaults_; protected: typedef FeatureFinderAlgorithmPickedHelperStructs::Seed Seed; typedef typename FeatureFinderAlgorithmPickedHelperStructs::MassTrace<PeakType> MassTrace; typedef typename FeatureFinderAlgorithmPickedHelperStructs::MassTraces<PeakType> MassTraces; typedef FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern TheoreticalIsotopePattern; typedef FeatureFinderAlgorithmPickedHelperStructs::IsotopePattern IsotopePattern; public: /// default constructor FeatureFinderAlgorithmPicked() : FeatureFinderAlgorithm<PeakType, FeatureType>(), map_(), log_() { //debugging defaults_.setValue("debug", "false", "When debug mode is activated, several files with intermediate results are written to the folder 'debug' (do not use in parallel mode)."); defaults_.setValidStrings("debug", ListUtils::create<String>("true,false")); //intensity defaults_.setValue("intensity:bins", 10, "Number of bins per dimension (RT and m/z). The higher this value, the more local the intensity significance score is.\nThis parameter should be decreased, if the algorithm is used on small regions of a map."); defaults_.setMinInt("intensity:bins", 1); defaults_.setSectionDescription("intensity", "Settings for the calculation of a score indicating if a peak's intensity is significant in the local environment (between 0 and 1)"); //mass trace search parameters defaults_.setValue("mass_trace:mz_tolerance", 0.03, "Tolerated m/z deviation of peaks belonging to the same mass trace.\nIt should be larger than the m/z resolution of the instrument.\nThis value must be smaller than that 1/charge_high!"); defaults_.setMinFloat("mass_trace:mz_tolerance", 0.0); defaults_.setValue("mass_trace:min_spectra", 10, "Number of spectra that have to show a similar peak mass in a mass trace."); defaults_.setMinInt("mass_trace:min_spectra", 1); defaults_.setValue("mass_trace:max_missing", 1, "Number of consecutive spectra where a high mass deviation or missing peak is acceptable.\nThis parameter should be well below 'min_spectra'!"); defaults_.setMinInt("mass_trace:max_missing", 0); defaults_.setValue("mass_trace:slope_bound", 0.1, "The maximum slope of mass trace intensities when extending from the highest peak.\nThis parameter is important to seperate overlapping elution peaks.\nIt should be increased if feature elution profiles fluctuate a lot."); defaults_.setMinFloat("mass_trace:slope_bound", 0.0); defaults_.setSectionDescription("mass_trace", "Settings for the calculation of a score indicating if a peak is part of a mass trace (between 0 and 1)."); //Isotopic pattern search parameters defaults_.setValue("isotopic_pattern:charge_low", 1, "Lowest charge to search for."); defaults_.setMinInt("isotopic_pattern:charge_low", 1); defaults_.setValue("isotopic_pattern:charge_high", 4, "Highest charge to search for."); defaults_.setMinInt("isotopic_pattern:charge_high", 1); defaults_.setValue("isotopic_pattern:mz_tolerance", 0.03, "Tolerated m/z deviation from the theoretical isotopic pattern.\nIt should be larger than the m/z resolution of the instrument.\nThis value must be smaller than that 1/charge_high!"); defaults_.setMinFloat("isotopic_pattern:mz_tolerance", 0.0); defaults_.setValue("isotopic_pattern:intensity_percentage", 10.0, "Isotopic peaks that contribute more than this percentage to the overall isotope pattern intensity must be present.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("isotopic_pattern:intensity_percentage", 0.0); defaults_.setMaxFloat("isotopic_pattern:intensity_percentage", 100.0); defaults_.setValue("isotopic_pattern:intensity_percentage_optional", 0.1, "Isotopic peaks that contribute more than this percentage to the overall isotope pattern intensity can be missing.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("isotopic_pattern:intensity_percentage_optional", 0.0); defaults_.setMaxFloat("isotopic_pattern:intensity_percentage_optional", 100.0); defaults_.setValue("isotopic_pattern:optional_fit_improvement", 2.0, "Minimal percental improvement of isotope fit to allow leaving out an optional peak.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("isotopic_pattern:optional_fit_improvement", 0.0); defaults_.setMaxFloat("isotopic_pattern:optional_fit_improvement", 100.0); defaults_.setValue("isotopic_pattern:mass_window_width", 25.0, "Window width in Dalton for precalculation of estimated isotope distributions.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("isotopic_pattern:mass_window_width", 1.0); defaults_.setMaxFloat("isotopic_pattern:mass_window_width", 200.0); defaults_.setValue("isotopic_pattern:abundance_12C", 98.93, "Rel. abundance of the light carbon. Modify if labeled.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("isotopic_pattern:abundance_12C", 0.0); defaults_.setMaxFloat("isotopic_pattern:abundance_12C", 100.0); defaults_.setValue("isotopic_pattern:abundance_14N", 99.632, "Rel. abundance of the light nitrogen. Modify if labeled.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("isotopic_pattern:abundance_14N", 0.0); defaults_.setMaxFloat("isotopic_pattern:abundance_14N", 100.0); defaults_.setSectionDescription("isotopic_pattern", "Settings for the calculation of a score indicating if a peak is part of a isotopic pattern (between 0 and 1)."); //Seed settings defaults_.setValue("seed:min_score", 0.8, "Minimum seed score a peak has to reach to be used as seed.\nThe seed score is the geometric mean of intensity score, mass trace score and isotope pattern score.\nIf your features show a large deviation from the averagene isotope distribution or from an gaussian elution profile, lower this score."); defaults_.setMinFloat("seed:min_score", 0.0); defaults_.setMaxFloat("seed:min_score", 1.0); defaults_.setSectionDescription("seed", "Settings that determine which peaks are considered a seed"); //Fitting settings defaults_.setValue("fit:max_iterations", 500, "Maximum number of iterations of the fit.", ListUtils::create<String>("advanced")); defaults_.setMinInt("fit:max_iterations", 1); defaults_.setSectionDescription("fit", "Settings for the model fitting"); //Feature settings defaults_.setValue("feature:min_score", 0.7, "Feature score threshold for a feature to be reported.\nThe feature score is the geometric mean of the average relative deviation and the correlation between the model and the observed peaks."); defaults_.setMinFloat("feature:min_score", 0.0); defaults_.setMaxFloat("feature:min_score", 1.0); defaults_.setValue("feature:min_isotope_fit", 0.8, "Minimum isotope fit of the feature before model fitting.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("feature:min_isotope_fit", 0.0); defaults_.setMaxFloat("feature:min_isotope_fit", 1.0); defaults_.setValue("feature:min_trace_score", 0.5, "Trace score threshold.\nTraces below this threshold are removed after the model fitting.\nThis parameter is important for features that overlap in m/z dimension.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("feature:min_trace_score", 0.0); defaults_.setMaxFloat("feature:min_trace_score", 1.0); defaults_.setValue("feature:min_rt_span", 0.333, "Minimum RT span in relation to extended area that has to remain after model fitting.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("feature:min_rt_span", 0.0); defaults_.setMaxFloat("feature:min_rt_span", 1.0); defaults_.setValue("feature:max_rt_span", 2.5, "Maximum RT span in relation to extended area that the model is allowed to have.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("feature:max_rt_span", 0.5); defaults_.setValue("feature:rt_shape", "symmetric", "Choose model used for RT profile fitting. If set to symmetric a gauss shape is used, in case of asymmetric an EGH shape is used.", ListUtils::create<String>("advanced")); defaults_.setValidStrings("feature:rt_shape", ListUtils::create<String>("symmetric,asymmetric")); defaults_.setValue("feature:max_intersection", 0.35, "Maximum allowed intersection of features.", ListUtils::create<String>("advanced")); defaults_.setMinFloat("feature:max_intersection", 0.0); defaults_.setMaxFloat("feature:max_intersection", 1.0); defaults_.setValue("feature:reported_mz", "monoisotopic", "The mass type that is reported for features.\n'maximum' returns the m/z value of the highest mass trace.\n'average' returns the intensity-weighted average m/z value of all contained peaks.\n'monoisotopic' returns the monoisotopic m/z value derived from the fitted isotope model."); defaults_.setValidStrings("feature:reported_mz", ListUtils::create<String>("maximum,average,monoisotopic")); defaults_.setSectionDescription("feature", "Settings for the features (intensity, quality assessment, ...)"); //user-specified seed settings defaults_.setValue("user-seed:rt_tolerance", 5.0, "Allowed RT deviation of seeds from the user-specified seed position."); defaults_.setMinFloat("user-seed:rt_tolerance", 0.0); defaults_.setValue("user-seed:mz_tolerance", 1.1, "Allowed m/z deviation of seeds from the user-specified seed position."); defaults_.setMinFloat("user-seed:mz_tolerance", 0.0); defaults_.setValue("user-seed:min_score", 0.5, "Overwrites 'seed:min_score' for user-specified seeds. The cutoff is typically a bit lower in this case."); defaults_.setMinFloat("user-seed:min_score", 0.0); defaults_.setMaxFloat("user-seed:min_score", 1.0); defaults_.setSectionDescription("user-seed", "Settings for user-specified seeds."); //debug settings defaults_.setValue("debug:pseudo_rt_shift", 500.0, "Pseudo RT shift used when .", ListUtils::create<String>("advanced")); defaults_.setMinFloat("debug:pseudo_rt_shift", 1.0); this->defaultsToParam_(); } // docu in base class virtual void setSeeds(const FeatureMapType& seeds) { seeds_ = seeds; } /// Main method for actual FeatureFinder virtual void run() { //------------------------------------------------------------------------- //General initialization //--------------------------------------------------------------------------- //quality estimation double min_feature_score = param_.getValue("feature:min_score"); //charges to look at SignedSize charge_low = (Int)param_.getValue("isotopic_pattern:charge_low"); SignedSize charge_high = (Int)param_.getValue("isotopic_pattern:charge_high"); //fitting settings UInt max_iterations = param_.getValue("fit:max_iterations"); Size max_isotopes = 20; // check if non-natural isotopic abundances are set. If so modify double abundance_12C = param_.getValue("isotopic_pattern:abundance_12C"); double abundance_14N = param_.getValue("isotopic_pattern:abundance_14N"); const Element* carbon_const = ElementDB::getInstance()->getElement("Carbon"); Element* carbon = const_cast<Element*>(carbon_const); if (param_.getValue("isotopic_pattern:abundance_12C") != defaults_.getValue("isotopic_pattern:abundance_12C")) { max_isotopes += 1000; IsotopeDistribution isotopes; std::vector<std::pair<Size, double> > container; container.push_back(std::make_pair(12, abundance_12C / 100.0)); container.push_back(std::make_pair(13, 1.0 - (abundance_12C / 100.0))); isotopes.set(container); carbon->setIsotopeDistribution(isotopes); } const Element* nitrogen_const = ElementDB::getInstance()->getElement("Nitrogen"); Element* nitrogen = const_cast<Element*>(nitrogen_const); if (param_.getValue("isotopic_pattern:abundance_14N") != defaults_.getValue("isotopic_pattern:abundance_14N")) { max_isotopes += 1000; IsotopeDistribution isotopes; std::vector<std::pair<Size, double> > container; container.push_back(std::make_pair(14, abundance_14N / 100.0)); container.push_back(std::make_pair(15, 1.0 - (abundance_14N / 100.0))); isotopes.set(container); nitrogen->setIsotopeDistribution(isotopes); } // initialize trace fitter parameters here to avoid // bug https://sourceforge.net/apps/trac/open-ms/ticket/147 Param trace_fitter_params; trace_fitter_params.setValue("max_iteration", max_iterations); //copy the input map map_ = *(FeatureFinderAlgorithm<PeakType, FeatureType>::map_); //flag for user-specified seed mode bool user_seeds = (seeds_.size() > 0); if (user_seeds) { seeds_.sortByMZ(); } double user_rt_tol = param_.getValue("user-seed:rt_tolerance"); double user_mz_tol = param_.getValue("user-seed:mz_tolerance"); double user_seed_score = param_.getValue("user-seed:min_score"); //reserve space for calculated scores UInt charge_count = charge_high - charge_low + 1; for (Size s = 0; s < map_.size(); ++s) { Size scan_size = map_[s].size(); map_[s].getFloatDataArrays().resize(3 + 2 * charge_count); map_[s].getFloatDataArrays()[0].setName("trace_score"); map_[s].getFloatDataArrays()[0].assign(scan_size, 0.0); map_[s].getFloatDataArrays()[1].setName("intensity_score"); map_[s].getFloatDataArrays()[1].assign(scan_size, 0.0); map_[s].getFloatDataArrays()[2].setName("local_max"); map_[s].getFloatDataArrays()[2].assign(scan_size, 0.0); //create isotope pattern score arrays UInt charge = charge_low; for (Size i = 3; i < 3 + charge_count; ++i) { map_[s].getFloatDataArrays()[i].setName(String("pattern_score_") + charge); map_[s].getFloatDataArrays()[i].assign(scan_size, 0.0); ++charge; } //create overall score arrays charge = charge_low; for (Size i = 3 + charge_count; i < 3 + 2 * charge_count; ++i) { map_[s].getFloatDataArrays()[i].setName(String("overall_score_") + charge); map_[s].getFloatDataArrays()[i].assign(scan_size, 0.0); ++charge; } } int gl_progress = 0; debug_ = ((String)(param_.getValue("debug")) == "true"); //clean up / create folders for debug information if (debug_) { QDir dir("."); dir.mkpath("debug/features"); log_.open("debug/log.txt"); } //--------------------------------------------------------------------------- //Step 1: //Precalculate intensity scores for peaks //--------------------------------------------------------------------------- if (debug_) log_ << "Precalculating intensity thresholds ..." << std::endl; //new scope to make local variables disappear { ff_->startProgress(0, intensity_bins_ * intensity_bins_, "Precalculating intensity scores"); double rt_start = map_.getMinRT(); double mz_start = map_.getMinMZ(); intensity_rt_step_ = (map_.getMaxRT() - rt_start) / (double)intensity_bins_; intensity_mz_step_ = (map_.getMaxMZ() - mz_start) / (double)intensity_bins_; intensity_thresholds_.resize(intensity_bins_); for (Size rt = 0; rt < intensity_bins_; ++rt) { intensity_thresholds_[rt].resize(intensity_bins_); double min_rt = rt_start + rt * intensity_rt_step_; double max_rt = rt_start + (rt + 1) * intensity_rt_step_; std::vector<double> tmp; for (Size mz = 0; mz < intensity_bins_; ++mz) { ff_->setProgress(rt * intensity_bins_ + mz); double min_mz = mz_start + mz * intensity_mz_step_; double max_mz = mz_start + (mz + 1) * intensity_mz_step_; //std::cout << "rt range: " << min_rt << " - " << max_rt << std::endl; //std::cout << "mz range: " << min_mz << " - " << max_mz << std::endl; tmp.clear(); for (typename MapType::ConstAreaIterator it = map_.areaBeginConst(min_rt, max_rt, min_mz, max_mz); it != map_.areaEndConst(); ++it) { tmp.push_back(it->getIntensity()); } //init vector intensity_thresholds_[rt][mz].assign(21, 0.0); //store quantiles (20) if (!tmp.empty()) { std::sort(tmp.begin(), tmp.end()); for (Size i = 0; i < 21; ++i) { Size index = (Size) std::floor(0.05 * i * (tmp.size() - 1)); intensity_thresholds_[rt][mz][i] = tmp[index]; } } } } //store intensity score in PeakInfo for (Size s = 0; s < map_.size(); ++s) { for (Size p = 0; p < map_[s].size(); ++p) { map_[s].getFloatDataArrays()[1][p] = intensityScore_(s, p); } } ff_->endProgress(); } //--------------------------------------------------------------------------- //Step 2: //Precalculate mass trace scores and local trace maximum for each peak //--------------------------------------------------------------------------- //new scope to make local variables disappear { Size end_iteration = map_.size() - std::min((Size) min_spectra_, map_.size()); ff_->startProgress(min_spectra_, end_iteration, "Precalculating mass trace scores"); // skip first and last scans since we cannot extend the mass traces there for (Size s = min_spectra_; s < end_iteration; ++s) { ff_->setProgress(s); const SpectrumType& spectrum = map_[s]; //iterate over all peaks of the scan for (Size p = 0; p < spectrum.size(); ++p) { std::vector<double> scores; scores.reserve(2 * min_spectra_); double pos = spectrum[p].getMZ(); float inte = spectrum[p].getIntensity(); //if(debug_) log_ << std::endl << "Peak: " << pos << std::endl; bool is_max_peak = true; //checking the maximum intensity peaks -> use them later as feature seeds. for (Size i = 1; i <= min_spectra_; ++i) { if (!map_[s + i].empty()) { Size spec_index = map_[s + i].findNearest(pos); double position_score = positionScore_(pos, map_[s + i][spec_index].getMZ(), trace_tolerance_); if (position_score > 0 && map_[s + i][spec_index].getIntensity() > inte) is_max_peak = false; scores.push_back(position_score); } else //no peaks in the spectrum { scores.push_back(0.0); } } for (Size i = 1; i <= min_spectra_; ++i) { if (!map_[s - i].empty()) { Size spec_index = map_[s - i].findNearest(pos); double position_score = positionScore_(pos, map_[s - i][spec_index].getMZ(), trace_tolerance_); if (position_score > 0 && map_[s - i][spec_index].getIntensity() > inte) is_max_peak = false; scores.push_back(position_score); } else //no peaks in the spectrum { scores.push_back(0.0); } } //Calculate a consensus score out of the scores calculated before double trace_score = std::accumulate(scores.begin(), scores.end(), 0.0) / scores.size(); //store final score for later use map_[s].getFloatDataArrays()[0][p] = trace_score; map_[s].getFloatDataArrays()[2][p] = is_max_peak; } } ff_->endProgress(); } //--------------------------------------------------------------------------- //Step 2.5: //Precalculate isotope distributions for interesting mass ranges //--------------------------------------------------------------------------- //new scope to make local variables disappear { double max_mass = map_.getMaxMZ() * charge_high; Size num_isotopes = std::ceil(max_mass / mass_window_width_) + 1; ff_->startProgress(0, num_isotopes, "Precalculating isotope distributions"); //reserve enough space isotope_distributions_.resize(num_isotopes); //calculate distribution if necessary for (Size index = 0; index < num_isotopes; ++index) { //if(debug_) log_ << "Calculating iso dist for mass: " << 0.5*mass_window_width_ + index * mass_window_width_ << std::endl; IsotopeDistribution d; d.setMaxIsotope(max_isotopes); d.estimateFromPeptideWeight(0.5 * mass_window_width_ + index * mass_window_width_); //trim left and right. And store the number of isotopes on the left, to reconstruct the monoisotopic peak Size size_before = d.size(); d.trimLeft(intensity_percentage_optional_); isotope_distributions_[index].trimmed_left = size_before - d.size(); d.trimRight(intensity_percentage_optional_); for (IsotopeDistribution::Iterator it = d.begin(); it != d.end(); ++it) { isotope_distributions_[index].intensity.push_back(it->second); //if(debug_) log_ << " - " << it->second << std::endl; } //determine the number of optional peaks at the beginning/end Size begin = 0; Size end = 0; bool is_begin = true; bool is_end = false; for (Size i = 0; i < isotope_distributions_[index].intensity.size(); ++i) { if (isotope_distributions_[index].intensity[i] < intensity_percentage_) { if (!is_end && !is_begin) is_end = true; if (is_begin) ++begin; else if (is_end) ++end; } else if (is_begin) { is_begin = false; } } isotope_distributions_[index].optional_begin = begin; isotope_distributions_[index].optional_end = end; //scale the distribution to a maximum of 1 double max = 0.0; for (Size i = 0; i < isotope_distributions_[index].intensity.size(); ++i) { if (isotope_distributions_[index].intensity[i] > max) { max = isotope_distributions_[index].intensity[i]; } } isotope_distributions_[index].max = max; for (Size i = 0; i < isotope_distributions_[index].intensity.size(); ++i) { isotope_distributions_[index].intensity[i] /= max; } //if(debug_) log_ << " - optional begin/end:" << begin << " / " << end << std::endl; } ff_->endProgress(); } //------------------------------------------------------------------------- //Step 3: //Charge loop (create seeds and features for each charge separately) //------------------------------------------------------------------------- Int plot_nr_global = -1; //counter for the number of plots (debug info) Int feature_nr_global = 0; //counter for the number of features (debug info) for (SignedSize c = charge_low; c <= charge_high; ++c) { UInt meta_index_isotope = 3 + c - charge_low; UInt meta_index_overall = 3 + charge_count + c - charge_low; Size feature_candidates = 0; std::vector<Seed> seeds; //----------------------------------------------------------- //Step 3.1: Precalculate IsotopePattern score //----------------------------------------------------------- ff_->startProgress(0, map_.size(), String("Calculating isotope pattern scores for charge ") + String(c)); for (Size s = 0; s < map_.size(); ++s) { ff_->setProgress(s); const SpectrumType& spectrum = map_[s]; for (Size p = 0; p < spectrum.size(); ++p) { double mz = spectrum[p].getMZ(); //get isotope distribution for this mass const TheoreticalIsotopePattern& isotopes = getIsotopeDistribution_(mz * c); //determine highest peak in isotope distribution Size max_isotope = std::max_element(isotopes.intensity.begin(), isotopes.intensity.end()) - isotopes.intensity.begin(); //Look up expected isotopic peaks (in the current spectrum or adjacent spectra) Size peak_index = spectrum.findNearest(mz - ((double)(isotopes.size() + 1) / c)); IsotopePattern pattern(isotopes.size()); for (Size i = 0; i < isotopes.size(); ++i) { double isotope_pos = mz + ((double)i - max_isotope) / c; findIsotope_(isotope_pos, s, pattern, i, peak_index); } double pattern_score = isotopeScore_(isotopes, pattern, true); //update pattern scores of all contained peaks (if necessary) if (pattern_score > 0.0) { for (Size i = 0; i < pattern.peak.size(); ++i) { if (pattern.peak[i] >= 0 && pattern_score > map_[pattern.spectrum[i]].getFloatDataArrays()[meta_index_isotope][pattern.peak[i]]) { map_[pattern.spectrum[i]].getFloatDataArrays()[meta_index_isotope][pattern.peak[i]] = pattern_score; } } } } } ff_->endProgress(); //----------------------------------------------------------- //Step 3.2: //Find seeds for this charge //----------------------------------------------------------- Size end_of_iteration = map_.size() - std::min((Size) min_spectra_, map_.size()); ff_->startProgress(min_spectra_, end_of_iteration, String("Finding seeds for charge ") + String(c)); double min_seed_score = param_.getValue("seed:min_score"); //do nothing for the first few and last few spectra as the scans required to search for traces are missing for (Size s = min_spectra_; s < end_of_iteration; ++s) { ff_->setProgress(s); //iterate over peaks for (Size p = 0; p < map_[s].size(); ++p) { FloatDataArrays& meta = map_[s].getFloatDataArrays(); double overall_score = std::pow(meta[0][p] * meta[1][p] * meta[meta_index_isotope][p], 1.0f / 3.0f); meta[meta_index_overall][p] = overall_score; //add seed to vector if certain conditions are fulfilled if (meta[2][p] != 0.0) // local maximum of mass trace is prerequisite for all features { //automatic seeds: overall score greater than the min seed score if (!user_seeds && overall_score >= min_seed_score) { Seed seed; seed.spectrum = s; seed.peak = p; seed.intensity = map_[s][p].getIntensity(); seeds.push_back(seed); } //user-specified seeds: overall score greater than USER min seed score else if (user_seeds && overall_score >= user_seed_score) { //only consider seeds, if they are near a user-specified seed FeatureType tmp; tmp.setMZ(map_[s][p].getMZ() - user_mz_tol); for (typename FeatureMapType::const_iterator it = std::lower_bound(seeds_.begin(), seeds_.end(), tmp, typename FeatureType::MZLess()); it < seeds_.end(); ++it) { if (it->getMZ() > map_[s][p].getMZ() + user_mz_tol) { break; } if (fabs(it->getMZ() - map_[s][p].getMZ()) < user_mz_tol && fabs(it->getRT() - map_[s].getRT()) < user_rt_tol) { Seed seed; seed.spectrum = s; seed.peak = p; seed.intensity = map_[s][p].getIntensity(); seeds.push_back(seed); break; } } } } } } //sort seeds according to intensity std::sort(seeds.rbegin(), seeds.rend()); //create and store seeds map and selected peak map if (debug_) { //seeds FeatureMap<> seed_map; seed_map.reserve(seeds.size()); for (Size i = 0; i < seeds.size(); ++i) { Size spectrum = seeds[i].spectrum; Size peak = seeds[i].peak; const FloatDataArrays& meta = map_[spectrum].getFloatDataArrays(); Feature tmp; tmp.setIntensity(seeds[i].intensity); tmp.setOverallQuality(meta[meta_index_overall][peak]); tmp.setRT(map_[spectrum].getRT()); tmp.setMZ(map_[spectrum][peak].getMZ()); tmp.setMetaValue("intensity_score", meta[1][peak]); tmp.setMetaValue("pattern_score", meta[meta_index_isotope][peak]); tmp.setMetaValue("trace_score", meta[0][peak]); seed_map.push_back(tmp); } FeatureXMLFile().store(String("debug/seeds_") + String(c) + ".featureXML", seed_map); } ff_->endProgress(); std::cout << "Found " << seeds.size() << " seeds for charge " << c << "." << std::endl; //------------------------------------------------------------------ //Step 3.3: //Extension of seeds //------------------------------------------------------------------ // We do not want to store features whose seeds lie within other // features with higher intensity. We thus store this information in // the map seeds_in_features which contains for each seed i a vector // of other seeds that are contained in the corresponding feature i. // // The features are stored in an temporary feature map until it is // decided whether they are contained within a seed of higher // intensity. std::map<Size, std::vector<Size> > seeds_in_features; typedef std::map<Size, FeatureType> FeatureMapType; FeatureMapType tmp_feature_map; gl_progress = 0; ff_->startProgress(0, seeds.size(), String("Extending seeds for charge ") + String(c)); #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < (SignedSize)seeds.size(); ++i) { //------------------------------------------------------------------ //Step 3.3.1: //Extend all mass traces //------------------------------------------------------------------ const SpectrumType& spectrum = map_[seeds[i].spectrum]; const PeakType& peak = spectrum[seeds[i].peak]; IF_MASTERTHREAD { ff_->setProgress(gl_progress++); if (debug_) { log_ << std::endl << "Seed " << i << ":" << std::endl; //If the intensity is zero this seed is already uses in another feature log_ << " - Int: " << peak.getIntensity() << std::endl; log_ << " - RT: " << spectrum.getRT() << std::endl; log_ << " - MZ: " << peak.getMZ() << std::endl; } } //---------------------------------------------------------------- //Find best fitting isotope pattern for this charge (using averagine) IsotopePattern best_pattern(0); double isotope_fit_quality = findBestIsotopeFit_(seeds[i], c, best_pattern); if (isotope_fit_quality < min_isotope_fit_) { abort_(seeds[i], "Could not find good enough isotope pattern containing the seed"); //continue; } else { //extend the convex hull in RT dimension (starting from the trace peaks) MassTraces traces; traces.reserve(best_pattern.peak.size()); extendMassTraces_(best_pattern, traces, meta_index_overall); //check if the traces are still valid double seed_mz = map_[seeds[i].spectrum][seeds[i].peak].getMZ(); if (!traces.isValid(seed_mz, trace_tolerance_)) { abort_(seeds[i], "Could not extend seed"); //continue; } else { //------------------------------------------------------------------ //Step 3.3.2: //Gauss/EGH fit (first fit to find the feature boundaries) //------------------------------------------------------------------ Int plot_nr = -1; #ifdef _OPENMP #pragma omp critical (FeatureFinderAlgorithmPicked_PLOTNR) #endif { plot_nr = ++plot_nr_global; } //------------------------------------------------------------------ //TODO try fit with baseline term once more //baseline estimate traces.updateBaseline(); traces.baseline = 0.75 * traces.baseline; traces[traces.max_trace].updateMaximum(); // choose fitter double egh_tau = 0.0; TraceFitter<PeakType>* fitter = chooseTraceFitter_(egh_tau); fitter->setParameters(trace_fitter_params); fitter->fit(traces); #if 0 TraceFitter<PeakType>* alt_fitter = new GaussTraceFitter<PeakType>(); Param alt_p; alt_p.setValue("max_iteration", max_iterations); alt_fitter->setParameters(alt_p); alt_fitter->fit(traces); LOG_DEBUG << "EGH: " << fitter->getCenter() << " " << fitter->getHeight() << std::endl; LOG_DEBUG << "GAUSS: " << alt_fitter->getCenter() << " " << alt_fitter->getHeight() << std::endl; #endif // what should come out // left "sigma" // right "sigma" // x0 .. "center" position of RT fit // height .. "height" of RT fit //------------------------------------------------------------------ //------------------------------------------------------------------ //Step 3.3.3: //Crop feature according to RT fit (2.5*sigma) and remove badly fitting traces //------------------------------------------------------------------ MassTraces new_traces; cropFeature_(fitter, traces, new_traces); //------------------------------------------------------------------ //Step 3.3.4: //Check if feature is ok //------------------------------------------------------------------ String error_msg = ""; double fit_score = 0.0; double correlation = 0.0; double final_score = 0.0; bool feature_ok = checkFeatureQuality_(fitter, new_traces, seed_mz, min_feature_score, error_msg, fit_score, correlation, final_score); #ifdef _OPENMP #pragma omp critical (FeatureFinderAlgorithmPicked_DEBUG) #endif { //write debug output of feature if (debug_) { writeFeatureDebugInfo_(fitter, traces, new_traces, feature_ok, error_msg, final_score, plot_nr, peak); } } traces = new_traces; //validity output if (!feature_ok) { abort_(seeds[i], error_msg); //continue; } else { //------------------------------------------------------------------ //Step 3.3.5: //Feature creation //------------------------------------------------------------------ Feature f; //set label f.setMetaValue(3, plot_nr); f.setCharge(c); f.setOverallQuality(final_score); f.setMetaValue("score_fit", fit_score); f.setMetaValue("score_correlation", correlation); f.setRT(fitter->getCenter()); f.setWidth(fitter->getFWHM()); // Extract some of the model parameters. if (egh_tau != 0.0) { egh_tau = (static_cast<EGHTraceFitter<PeakType>*>(fitter))->getTau(); f.setMetaValue("EGH_tau", egh_tau); f.setMetaValue("EGH_height", (static_cast<EGHTraceFitter<PeakType>*>(fitter))->getHeight()); f.setMetaValue("EGH_sigma", (static_cast<EGHTraceFitter<PeakType>*>(fitter))->getSigma()); } // Calculate the mass of the feature: maximum, average, monoisotopic if (reported_mz_ == "maximum") { f.setMZ(traces[traces.getTheoreticalmaxPosition()].getAvgMZ()); } else if (reported_mz_ == "average") { double total_intensity = 0.0; double average_mz = 0.0; for (Size t = 0; t < traces.size(); ++t) { for (Size p = 0; p < traces[t].peaks.size(); ++p) { average_mz += traces[t].peaks[p].second->getMZ() * traces[t].peaks[p].second->getIntensity(); total_intensity += traces[t].peaks[p].second->getIntensity(); } } average_mz /= total_intensity; f.setMZ(average_mz); } else if (reported_mz_ == "monoisotopic") { double mono_mz = traces[traces.getTheoreticalmaxPosition()].getAvgMZ(); mono_mz -= (Constants::PROTON_MASS_U / c) * (traces.getTheoreticalmaxPosition() + best_pattern.theoretical_pattern.trimmed_left); f.setMZ(mono_mz); } // Calculate intensity based on model only // - the model does not include the baseline, so we ignore it here // - as we scaled the isotope distribution to f.setIntensity(fitter->getArea() / getIsotopeDistribution_(f.getMZ()).max); // we do not need the fitter anymore delete fitter; //add convex hulls of mass traces for (Size j = 0; j < traces.size(); ++j) { f.getConvexHulls().push_back(traces[j].getConvexhull()); } #ifdef _OPENMP #pragma omp critical (FeatureFinderAlgorithmPicked_TMPFEATUREMAP) #endif { tmp_feature_map[i] = f; } //---------------------------------------------------------------- //Remember all seeds that lie inside the convex hull of the new feature DBoundingBox<2> bb = f.getConvexHull().getBoundingBox(); for (Size j = i + 1; j < seeds.size(); ++j) { double rt = map_[seeds[j].spectrum].getRT(); double mz = map_[seeds[j].spectrum][seeds[j].peak].getMZ(); if (bb.encloses(rt, mz) && f.encloses(rt, mz)) { #ifdef _OPENMP #pragma omp critical (FeatureFinderAlgorithmPicked_SEEDSINFEATURES) #endif { seeds_in_features[i].push_back(j); } } } } } } // three if/else statements instead of continue (disallowed in OpenMP) } // end of OPENMP over seeds // Here we have to evaluate which seeds are already contained in // features of seeds with higher intensities. Only if the seed is not // used in any feature with higher intensity, we can add it to the // features_ list. std::vector<Size> seeds_contained; for (typename std::map<Size, FeatureType>::iterator iter = tmp_feature_map.begin(); iter != tmp_feature_map.end(); ++iter) { Size seed_nr = iter->first; bool is_used = false; for (Size i = 0; i < seeds_contained.size(); ++i) { if (seed_nr == seeds_contained[i]) { is_used = true; break; } } if (!is_used) { ++feature_candidates; //re-set label iter->second.setMetaValue(3, feature_nr_global); ++feature_nr_global; features_->push_back(iter->second); std::vector<Size> curr_seed = seeds_in_features[seed_nr]; for (Size k = 0; k < curr_seed.size(); ++k) { seeds_contained.push_back(curr_seed[k]); } } } IF_MASTERTHREAD ff_->endProgress(); std::cout << "Found " << feature_candidates << " feature candidates for charge " << c << "." << std::endl; } // END OPENMP //------------------------------------------------------------------ //Step 4: //Resolve contradicting and overlapping features //------------------------------------------------------------------ ff_->startProgress(0, features_->size() * features_->size(), "Resolving overlapping features"); if (debug_) log_ << "Resolving intersecting features (" << features_->size() << " candidates)" << std::endl; //sort features according to m/z in order to speed up the resolution features_->sortByMZ(); //precalculate BBs and maximum mz span std::vector<DBoundingBox<2> > bbs(features_->size()); double max_mz_span = 0.0; for (Size i = 0; i < features_->size(); ++i) { bbs[i] = (*features_)[i].getConvexHull().getBoundingBox(); if (bbs[i].height() > max_mz_span) { max_mz_span = bbs[i].height(); } } Size removed(0); //intersect for (Size i = 0; i < features_->size(); ++i) { Feature& f1((*features_)[i]); for (Size j = i + 1; j < features_->size(); ++j) { ff_->setProgress(i * features_->size() + j); Feature& f2((*features_)[j]); //features that are more than 2 times the maximum m/z span apart do not overlap => abort if (f2.getMZ() - f1.getMZ() > 2.0 * max_mz_span) break; //do nothing if one of the features is already removed if (f1.getIntensity() == 0.0 || f2.getIntensity() == 0.0) continue; //do nothing if the overall convex hulls do not overlap if (!bbs[i].intersects(bbs[j])) continue; //act depending on the intersection double intersection = intersection_(f1, f2); if (intersection >= max_feature_intersection_) { ++removed; if (debug_) log_ << " - Intersection (" << (i + 1) << "/" << (j + 1) << "): " << intersection << std::endl; if (f1.getCharge() == f2.getCharge()) { if (f1.getIntensity() * f1.getOverallQuality() > f2.getIntensity() * f2.getOverallQuality()) { if (debug_) log_ << " - same charge -> removing duplicate " << (j + 1) << std::endl; f1.getSubordinates().push_back(f2); f2.setIntensity(0.0); } else { if (debug_) log_ << " - same charge -> removing duplicate " << (i + 1) << std::endl; f2.getSubordinates().push_back(f1); f1.setIntensity(0.0); } } else if (f2.getCharge() % f1.getCharge() == 0) { if (debug_) log_ << " - different charge (one is the multiple of the other) -> removing lower charge " << (i + 1) << std::endl; f2.getSubordinates().push_back(f1); f1.setIntensity(0.0); } else if (f1.getCharge() % f2.getCharge() == 0) { if (debug_) log_ << " - different charge (one is the multiple of the other) -> removing lower charge " << (i + 1) << std::endl; f1.getSubordinates().push_back(f2); f2.setIntensity(0.0); } else { if (f1.getOverallQuality() > f2.getOverallQuality()) { if (debug_) log_ << " - different charge -> removing lower score " << (j + 1) << std::endl; f1.getSubordinates().push_back(f2); f2.setIntensity(0.0); } else { if (debug_) log_ << " - different charge -> removing lower score " << (i + 1) << std::endl; f2.getSubordinates().push_back(f1); f1.setIntensity(0.0); } } } } } LOG_INFO << "Removed " << removed << " overlapping features." << std::endl; //finally remove features with intensity 0 FeatureMap<> tmp; tmp.reserve(features_->size()); for (Size i = 0; i < features_->size(); ++i) { if (features_->operator[](i).getIntensity() != 0.0) { tmp.push_back(features_->operator[](i)); } } tmp.swapFeaturesOnly(*features_); //sort features by intensity features_->sortByIntensity(true); ff_->endProgress(); std::cout << features_->size() << " features left." << std::endl; //Abort reasons std::cout << std::endl; std::cout << "Abort reasons during feature construction:" << std::endl; for (std::map<String, UInt>::const_iterator it = aborts_.begin(); it != aborts_.end(); ++it) { std::cout << "- " << it->first << ": " << it->second << std::endl; } if (debug_) { //store map of abort reasons for failed seeds FeatureMap<> abort_map; abort_map.reserve(abort_reasons_.size()); Size counter = 0; for (typename std::map<Seed, String>::iterator it2 = abort_reasons_.begin(); it2 != abort_reasons_.end(); ++it2, ++counter) { Feature f; f.setRT(map_[it2->first.spectrum].getRT()); f.setMZ(map_[it2->first.spectrum][it2->first.peak].getMZ()); f.setIntensity(map_[it2->first.spectrum][it2->first.peak].getIntensity()); f.setMetaValue("label", it2->second); f.setUniqueId(counter); // ID = index abort_map.push_back(f); } abort_map.setUniqueId(); FeatureXMLFile().store("debug/abort_reasons.featureXML", abort_map); //store input map with calculated scores (without overall score) for (Size s = 0; s < map_.size(); ++s) { map_[s].getFloatDataArrays().erase(map_[s].getFloatDataArrays().begin() + 2); } MzMLFile().store("debug/input.mzML", map_); } } static FeatureFinderAlgorithm<PeakType, FeatureType>* create() { return new FeatureFinderAlgorithmPicked(); } static const String getProductName() { return "centroided"; } protected: /// editable copy of the map MapType map_; /// Output stream for log/debug info mutable std::ofstream log_; /// debug flag bool debug_; /// Array of abort reasons std::map<String, UInt> aborts_; /// Array of abort reasons std::map<Seed, String> abort_reasons_; /// User-specified seed list FeatureMapType seeds_; /// @name Members for parameters often needed in methods //@{ double pattern_tolerance_; ///< Stores mass_trace:mz_tolerance double trace_tolerance_; ///< Stores isotopic_pattern:mz_tolerance UInt min_spectra_; ///< Number of spectra that have to show the same mass (for finding a mass trace) UInt max_missing_trace_peaks_; ///< Stores mass_trace:max_missing double slope_bound_; ///< Max slope of mass trace intensities double intensity_percentage_; ///< Isotope pattern intensity contribution of required peaks double intensity_percentage_optional_; ///< Isotope pattern intensity contribution of optional peaks double optional_fit_improvement_; ///< Minimal improvement for leaving out optional isotope double mass_window_width_; ///< Width of the isotope pattern mass bins UInt intensity_bins_; ///< Number of bins (in RT and MZ) for intensity significance estimation double min_isotope_fit_; ///< Minimum isotope pattern fit for a feature double min_trace_score_; ///< Minimum quality of a traces double min_rt_span_; ///< Minimum RT range that has to be left after the fit double max_rt_span_; ///< Maximum RT range the model is allowed to span double max_feature_intersection_; ///< Maximum allowed feature intersection (if larger, that one of the feature is removed) String reported_mz_; ///< The mass type that is reported for features. 'maximum' returns the m/z value of the highest mass trace. 'average' returns the intensity-weighted average m/z value of all contained peaks. 'monoisotopic' returns the monoisotopic m/z value derived from the fitted isotope model. //@} /// @name Members for intensity significance estimation //@{ /// RT bin width double intensity_rt_step_; /// m/z bin width double intensity_mz_step_; /// Precalculated intensity 20-quantiles (binned) std::vector<std::vector<std::vector<double> > > intensity_thresholds_; //@} ///Vector of precalculated isotope distributions for several mass windows std::vector<TheoreticalIsotopePattern> isotope_distributions_; // Docu in base class virtual void updateMembers_() { pattern_tolerance_ = param_.getValue("mass_trace:mz_tolerance"); trace_tolerance_ = param_.getValue("isotopic_pattern:mz_tolerance"); min_spectra_ = (UInt) std::floor((double)param_.getValue("mass_trace:min_spectra") * 0.5); max_missing_trace_peaks_ = param_.getValue("mass_trace:max_missing"); slope_bound_ = param_.getValue("mass_trace:slope_bound"); intensity_percentage_ = (double)param_.getValue("isotopic_pattern:intensity_percentage") / 100.0; intensity_percentage_optional_ = (double)param_.getValue("isotopic_pattern:intensity_percentage_optional") / 100.0; optional_fit_improvement_ = (double)param_.getValue("isotopic_pattern:optional_fit_improvement") / 100.0; mass_window_width_ = param_.getValue("isotopic_pattern:mass_window_width"); intensity_bins_ = param_.getValue("intensity:bins"); min_isotope_fit_ = param_.getValue("feature:min_isotope_fit"); min_trace_score_ = param_.getValue("feature:min_trace_score"); min_rt_span_ = param_.getValue("feature:min_rt_span"); max_rt_span_ = param_.getValue("feature:max_rt_span"); max_feature_intersection_ = param_.getValue("feature:max_intersection"); reported_mz_ = param_.getValue("feature:reported_mz"); } /// Writes the abort reason to the log file and counts occurrences for each reason void abort_(const Seed& seed, const String& reason) { if (debug_) log_ << "Abort: " << reason << std::endl; aborts_[reason]++; if (debug_) abort_reasons_[seed] = reason; } /** * Calculates the intersection between features. * The value is normalized by the size of the smaller feature, so it ranges from 0 to 1. */ double intersection_(const Feature& f1, const Feature& f2) const { //calculate the RT range sum of feature 1 double s1 = 0.0; const std::vector<ConvexHull2D>& hulls1 = f1.getConvexHulls(); for (Size i = 0; i < hulls1.size(); ++i) { s1 += hulls1[i].getBoundingBox().width(); } //calculate the RT range sum of feature 2 double s2 = 0.0; const std::vector<ConvexHull2D>& hulls2 = f2.getConvexHulls(); for (Size j = 0; j < hulls2.size(); ++j) { s2 += hulls2[j].getBoundingBox().width(); } //calculate overlap double overlap = 0.0; for (Size i = 0; i < hulls1.size(); ++i) { DBoundingBox<2> bb1 = hulls1[i].getBoundingBox(); for (Size j = 0; j < hulls2.size(); ++j) { DBoundingBox<2> bb2 = hulls2[j].getBoundingBox(); if (bb1.intersects(bb2)) { if (bb1.minPosition()[0] <= bb2.minPosition()[0] && bb1.maxPosition()[0] >= bb2.maxPosition()[0]) //bb1 contains bb2 { overlap += bb2.width(); } else if (bb2.minPosition()[0] <= bb1.minPosition()[0] && bb2.maxPosition()[0] >= bb1.maxPosition()[0]) //bb2 contains bb1 { overlap += bb1.width(); } else if (bb1.minPosition()[0] <= bb2.minPosition()[0] && bb1.maxPosition()[0] <= bb2.maxPosition()[0]) //the end of bb1 overlaps with bb2 { overlap += bb1.maxPosition()[0] - bb2.minPosition()[0]; } else if (bb2.minPosition()[0] <= bb1.minPosition()[0] && bb2.maxPosition()[0] <= bb1.maxPosition()[0]) //the end of bb2 overlaps with bb1 { overlap += bb2.maxPosition()[0] - bb1.minPosition()[0]; } } } } return overlap / std::min(s1, s2); } /// Returns the isotope distribution for a certain mass window const TheoreticalIsotopePattern& getIsotopeDistribution_(double mass) const { //calculate index in the vector Size index = (Size) std::floor(mass / mass_window_width_); if (index >= isotope_distributions_.size()) { throw Exception::InvalidValue(__FILE__, __LINE__, __PRETTY_FUNCTION__, "IsotopeDistribution not precalculated. Maximum allowed index is " + String(isotope_distributions_.size()), String(index)); } //Return distribution return isotope_distributions_[index]; } /** @brief Finds the best fitting position of the isotopic pattern estimate defined by @p center @param center the maximum peak of the isotope distribution (contains charge as well) @param charge The charge of the pattern @param best_pattern Returns the indices of the isotopic peaks. If a isotopic peak is missing -1 is returned. */ double findBestIsotopeFit_(const Seed& center, UInt charge, IsotopePattern& best_pattern) const { if (debug_) log_ << "Testing isotope patterns for charge " << charge << ": " << std::endl; const SpectrumType& spectrum = map_[center.spectrum]; const TheoreticalIsotopePattern& isotopes = getIsotopeDistribution_(spectrum[center.peak].getMZ() * charge); if (debug_) log_ << " - Seed: " << center.peak << " (mz:" << spectrum[center.peak].getMZ() << ")" << std::endl; //Find m/z boundaries of search space (linear search as this is local and we have the center already) double mass_window = (double)(isotopes.size() + 1) / (double)charge; if (debug_) log_ << " - Mass window: " << mass_window << std::endl; Size end = center.peak; while (end < spectrum.size() && spectrum[end].getMZ() < spectrum[center.peak].getMZ() + mass_window) { ++end; } --end; //search begin SignedSize begin = center.peak; while (begin >= 0 && spectrum[begin].getMZ() > spectrum[center.peak].getMZ() - mass_window) { --begin; } ++begin; if (debug_) log_ << " - Begin: " << begin << " (mz:" << spectrum[begin].getMZ() << ")" << std::endl; if (debug_) log_ << " - End: " << end << " (mz:" << spectrum[end].getMZ() << ")" << std::endl; //fit isotope distribution to peaks double max_score = 0.0; for (Size start = begin; start <= end; ++start) { //find isotope peaks for the current start peak Size peak_index = start; IsotopePattern pattern(isotopes.size()); if (debug_) log_ << " - Fitting at " << start << " (mz:" << spectrum[start].getMZ() << ")" << std::endl; for (Size iso = 0; iso < isotopes.size(); ++iso) { double pos = spectrum[start].getMZ() + iso / (double)charge; findIsotope_(pos, center.spectrum, pattern, iso, peak_index); } //check if the seed is contained, otherwise abort bool seed_contained = false; for (Size iso = 0; iso < pattern.peak.size(); ++iso) { if (pattern.peak[iso] == (Int)center.peak && pattern.spectrum[iso] == center.spectrum) { seed_contained = true; break; } } if (!seed_contained) { if (debug_) log_ << " - aborting: seed is not contained!" << std::endl; continue; } double score = isotopeScore_(isotopes, pattern, false); //check if the seed is still contained, otherwise abort seed_contained = false; for (Size iso = 0; iso < pattern.peak.size(); ++iso) { if (pattern.peak[iso] == (Int)center.peak && pattern.spectrum[iso] == center.spectrum) { seed_contained = true; break; } } if (!seed_contained) { if (debug_) log_ << " - aborting: seed was removed during isotope fit!" << std::endl; continue; } if (debug_) log_ << " - final score: " << score << std::endl; if (score > max_score) { max_score = score; best_pattern = pattern; } } if (debug_) log_ << " - best score : " << max_score << std::endl; best_pattern.theoretical_pattern = isotopes; return max_score; } /** Extends all mass traces of an isotope pattern in one step @param pattern The IsotopePattern that should be extended. @param traces The MassTraces datastructure where the extended mass traces will be stored in. @param meta_index_overall The index of the data array where the quality scores for the given charge are stored. */ void extendMassTraces_(const IsotopePattern& pattern, MassTraces& traces, Size meta_index_overall) const { //find index of the trace with the maximum intensity double max_int = 0.0; Size max_trace_index = 0; for (Size p = 0; p < pattern.peak.size(); ++p) { if (pattern.peak[p] < 0) continue; //skip missing and removed traces if (map_[pattern.spectrum[p]][pattern.peak[p]].getIntensity() > max_int) { max_int = map_[pattern.spectrum[p]][pattern.peak[p]].getIntensity(); max_trace_index = p; } } //extend the maximum intensity trace to determine the boundaries in RT dimension Size start_index = pattern.spectrum[max_trace_index]; const PeakType* start_peak = &(map_[pattern.spectrum[max_trace_index]][pattern.peak[max_trace_index]]); double start_mz = start_peak->getMZ(); double start_rt = map_[start_index].getRT(); if (debug_) log_ << " - Trace " << max_trace_index << " (maximum intensity)" << std::endl; if (debug_) log_ << " - extending from: " << map_[start_index].getRT() << " / " << start_mz << " (int: " << start_peak->getIntensity() << ")" << std::endl; //initialize the trace and extend MassTrace max_trace; max_trace.peaks.push_back(std::make_pair(start_rt, start_peak)); extendMassTrace_(max_trace, start_index, start_mz, false, meta_index_overall); extendMassTrace_(max_trace, start_index, start_mz, true, meta_index_overall); double rt_max = max_trace.peaks.back().first; double rt_min = max_trace.peaks.begin()->first; if (debug_) log_ << " - rt bounds: " << rt_min << "-" << rt_max << std::endl; //Abort if too few peak were found if (!max_trace.isValid() || max_trace.peaks.size() < 2 * min_spectra_ - max_missing_trace_peaks_) { if (debug_) log_ << " - could not extend trace with maximum intensity => abort" << std::endl; return; } for (Size p = 0; p < pattern.peak.size(); ++p) { if (debug_) log_ << " - Trace " << p << std::endl; if (p == max_trace_index) { if (debug_) log_ << " - previously extended maximum trace" << std::endl; traces.push_back(max_trace); traces.back().theoretical_int = pattern.theoretical_pattern.intensity[p]; traces.max_trace = traces.size() - 1; continue; } Seed starting_peak; starting_peak.spectrum = pattern.spectrum[p]; starting_peak.peak = pattern.peak[p]; if (pattern.peak[p] == -2) { if (debug_) log_ << " - removed during isotope fit" << std::endl; continue; } else if (pattern.peak[p] == -1) { if (debug_) log_ << " - missing" << std::endl; continue; } starting_peak.intensity = map_[starting_peak.spectrum][starting_peak.peak].getIntensity(); if (debug_) log_ << " - trace seed: " << map_[starting_peak.spectrum].getRT() << " / " << map_[starting_peak.spectrum][starting_peak.peak].getMZ() << " (int: " << map_[starting_peak.spectrum][starting_peak.peak].getIntensity() << ")" << std::endl; //search for nearby maximum of the mass trace as the extension assumes that it starts at the maximum Size begin = std::max((Size)0, starting_peak.spectrum - min_spectra_); Size end = std::min(starting_peak.spectrum + min_spectra_, (Size)map_.size()); double mz = map_[starting_peak.spectrum][starting_peak.peak].getMZ(); double inte = map_[starting_peak.spectrum][starting_peak.peak].getIntensity(); for (Size spectrum_index = begin; spectrum_index < end; ++spectrum_index) { //find better seeds (no-empty scan/low mz diff/higher intensity) SignedSize peak_index = -1; if (!map_[spectrum_index].empty()) { peak_index = map_[spectrum_index].findNearest(map_[starting_peak.spectrum][starting_peak.peak].getMZ()); } if (peak_index < 0 || map_[spectrum_index][peak_index].getIntensity() <= inte || std::fabs(mz - map_[spectrum_index][peak_index].getMZ()) >= pattern_tolerance_ ) { continue; } starting_peak.spectrum = spectrum_index; starting_peak.peak = peak_index; inte = map_[spectrum_index][peak_index].getIntensity(); } if (debug_) log_ << " - extending from: " << map_[starting_peak.spectrum].getRT() << " / " << map_[starting_peak.spectrum][starting_peak.peak].getMZ() << " (int: " << map_[starting_peak.spectrum][starting_peak.peak].getIntensity() << ")" << std::endl; //------------------------------------------------------------------ //Extend seed to a mass trace MassTrace trace; const PeakType* seed = &(map_[starting_peak.spectrum][starting_peak.peak]); //initialize trace with seed data and extend trace.peaks.push_back(std::make_pair(map_[starting_peak.spectrum].getRT(), seed)); extendMassTrace_(trace, starting_peak.spectrum, seed->getMZ(), false, meta_index_overall, rt_min, rt_max); extendMassTrace_(trace, starting_peak.spectrum, seed->getMZ(), true, meta_index_overall, rt_min, rt_max); //check if enough peaks were found if (!trace.isValid()) { if (debug_) log_ << " - could not extend trace " << std::endl; //Missing traces in the middle of a pattern are not acceptable => fix this if (p < traces.max_trace) { traces.clear(); //remove earlier traces continue; } else if (p > traces.max_trace) { break; //no more traces are possible } } traces.push_back(trace); traces.back().theoretical_int = pattern.theoretical_pattern.intensity[p]; } } /** @brief Extends a single mass trace in one RT direction How to use this method: - Add the starting peak to the @p trace - Indicate using @c increase_rt whether to extend in downstream or upstream direction @param trace The trace that should be extended @param spectrum_index The index of the spectrum from which on the mass trace should be extended @param mz The mz location (center) of the trace @param increase_rt Indicator whether the extension is done in forward or backward direction (with respect to the current spectrum) @param meta_index_overall The index of the overall score @param min_rt The rt minimum up to which the trace will be extended. @param max_rt The rt maximum up to which the trace will be extended. @note This method assumes that it extends from a local maximum. @note If @c min_rt or @c max_rt are set to 0.0 no boundary is assumed in the respective direction. */ void extendMassTrace_(MassTrace& trace, SignedSize spectrum_index, double mz, bool increase_rt, Size meta_index_overall, double min_rt = 0.0, double max_rt = 0.0) const { //Reverse peaks if we run the method for the second time (to keep them in chronological order) if (increase_rt) { ++spectrum_index; std::reverse(trace.peaks.begin(), trace.peaks.end()); } else { --spectrum_index; } //check if boundaries are set bool boundaries = false; if (max_rt != min_rt) { boundaries = true; } //Relax slope threshold if there is a hard boundary for the extension double current_slope_bound = (1.0 + (double)boundaries) * slope_bound_; Size delta_count = min_spectra_; std::vector<double> deltas(delta_count - 1, 0); double last_observed_intensity = trace.peaks.back().second->getIntensity(); UInt missing_peaks = 0; Size peaks_before_extension = trace.peaks.size(); String abort_reason = ""; while ((!increase_rt && spectrum_index >= 0) || (increase_rt && spectrum_index < (SignedSize)map_.size())) { if (boundaries && ((!increase_rt && map_[spectrum_index].getRT() < min_rt) || (increase_rt && map_[spectrum_index].getRT() > max_rt)) ) { abort_reason = "Hit upper/lower boundary"; break; } SignedSize peak_index = -1; if (!map_[spectrum_index].empty()) { peak_index = map_[spectrum_index].findNearest(mz); } // check if the peak is "missing" if ( peak_index < 0 // no peak found || map_[spectrum_index].getFloatDataArrays()[meta_index_overall][peak_index] < 0.01 // overall score is to low || positionScore_(mz, map_[spectrum_index][peak_index].getMZ(), trace_tolerance_) == 0.0 // deviation of mz is too big ) { ++missing_peaks; if (missing_peaks > max_missing_trace_peaks_) { abort_reason = "too many peaks missing"; break; } } else { missing_peaks = 0; //add found peak to trace trace.peaks.push_back(std::make_pair(map_[spectrum_index].getRT(), &(map_[spectrum_index][peak_index]))); //update deltas and intensities deltas.push_back((map_[spectrum_index][peak_index].getIntensity() - last_observed_intensity) / last_observed_intensity); last_observed_intensity = map_[spectrum_index][peak_index].getIntensity(); //Abort if the average delta is too big (as intensity increases then) double average_delta = std::accumulate(deltas.end() - delta_count, deltas.end(), 0.0) / (double)delta_count; if (average_delta > current_slope_bound) { abort_reason = String("Average delta above threshold: ") + average_delta + "/" + current_slope_bound; //remove last peaks as we extended too far Size remove = std::min((Size)(trace.peaks.size() - peaks_before_extension), delta_count - 1); trace.peaks.erase(trace.peaks.end() - remove, trace.peaks.end()); break; } } //increase/decrease scan index if (increase_rt) ++spectrum_index; else --spectrum_index; } if (debug_) log_ << " - Added " << (trace.peaks.size() - peaks_before_extension) << " peaks (abort: " << abort_reason << ")" << std::endl; } /// Returns the index of the peak nearest to m/z @p pos in spectrum @p spec (linear search starting from index @p start) template <typename SpectrumType> Size nearest_(double pos, const SpectrumType& spec, Size start) const { Size index = start; double distance = std::fabs(pos - spec[index].getMZ()); ++index; while (index < spec.size()) { double new_distance = std::fabs(pos - spec[index].getMZ()); if (new_distance < distance) { distance = new_distance; ++index; } else { break; } } return --index; } /** @brief Searches for an isotopic peak in the current spectrum and the adjacent spectra @param pos m/z position of the searched for peak @param spectrum_index index of the central spectrum @param pattern IsotopePattern to store found peaks @param pattern_index index of the isotope in the pattern @param peak_index starting index of the search (to avoid multiple binary searches) */ void findIsotope_(double pos, Size spectrum_index, IsotopePattern& pattern, Size pattern_index, Size& peak_index) const { if (debug_) log_ << " - Isotope " << pattern_index << ": "; double intensity = 0.0; double pos_score = 0.0; UInt matches = 0; //search in the center spectrum const SpectrumType& spectrum = map_[spectrum_index]; peak_index = nearest_(pos, spectrum, peak_index); double this_mz_score = positionScore_(pos, spectrum[peak_index].getMZ(), pattern_tolerance_); pattern.theoretical_mz[pattern_index] = pos; if (this_mz_score != 0.0) { if (debug_) log_ << String::number(spectrum[peak_index].getIntensity(), 1) << " "; pattern.peak[pattern_index] = peak_index; pattern.spectrum[pattern_index] = spectrum_index; intensity += spectrum[peak_index].getIntensity(); pos_score += this_mz_score; ++matches; } //previous spectrum if (spectrum_index != 0 && !map_[spectrum_index - 1].empty()) { const SpectrumType& spectrum_before = map_[spectrum_index - 1]; Size index_before = spectrum_before.findNearest(pos); double mz_score = positionScore_(pos, spectrum_before[index_before].getMZ(), pattern_tolerance_); if (mz_score != 0.0) { if (debug_) log_ << String::number(spectrum_before[index_before].getIntensity(), 1) << "b "; intensity += spectrum_before[index_before].getIntensity(); pos_score += mz_score; ++matches; if (pattern.peak[pattern_index] == -1) { pattern.peak[pattern_index] = index_before; pattern.spectrum[pattern_index] = spectrum_index - 1; } } } //next spectrum if (spectrum_index != map_.size() - 1 && !map_[spectrum_index + 1].empty()) { const SpectrumType& spectrum_after = map_[spectrum_index + 1]; Size index_after = spectrum_after.findNearest(pos); double mz_score = positionScore_(pos, spectrum_after[index_after].getMZ(), pattern_tolerance_); if (mz_score != 0.0) { if (debug_) log_ << String::number(spectrum_after[index_after].getIntensity(), 1) << "a "; intensity += spectrum_after[index_after].getIntensity(); pos_score += mz_score; ++matches; if (pattern.peak[pattern_index] == -1) { pattern.peak[pattern_index] = index_after; pattern.spectrum[pattern_index] = spectrum_index + 1; } } } //no isotope found if (matches == 0) { if (debug_) log_ << " missing" << std::endl; pattern.peak[pattern_index] = -1; pattern.mz_score[pattern_index] = 0.0; pattern.intensity[pattern_index] = 0.0; } else { if (debug_) log_ << "=> " << intensity / matches << std::endl; pattern.mz_score[pattern_index] = pos_score / matches; pattern.intensity[pattern_index] = intensity / matches; } } /// Calculates a score between 0 and 1 for the m/z deviation of two peaks. double positionScore_(double pos1, double pos2, double allowed_deviation) const { double diff = fabs(pos1 - pos2); if (diff <= 0.5 * allowed_deviation) { return 0.1 * (0.5 * allowed_deviation - diff) / (0.5 * allowed_deviation) + 0.9; } else if (diff <= allowed_deviation) { return 0.9 * (allowed_deviation - diff) / (0.5 * allowed_deviation); } return 0.0; } /// Calculates a score between 0 and 1 for the correlation between theoretical and found isotope pattern double isotopeScore_(const TheoreticalIsotopePattern& isotopes, IsotopePattern& pattern, bool consider_mz_distances) const { if (debug_) log_ << " - fitting " << pattern.intensity.size() << " peaks" << std::endl; //Abort if a core peak is missing for (Size iso = 0 + isotopes.optional_begin; iso < pattern.peak.size() - isotopes.optional_end; ++iso) { if (pattern.peak[iso] == -1) { if (debug_) log_ << " - aborting: core peak is missing" << std::endl; return 0.0; } } //Find best isotope fit // - try to leave out optional isotope peaks to improve the fit // - do not allow gaps inside the pattern double best_int_score = 0.01; //Not 0 as this would result in problems when checking for the percental improvement Size best_begin = 0; for (Size i = isotopes.optional_begin; i > 0; --i) { if (pattern.peak[i - 1] == -1) { best_begin = i; break; } } Size best_end = 0; for (Size i = isotopes.optional_end; i > 0; --i) { if (pattern.peak[pattern.peak.size() - i] == -1) { best_end = i; break; } } if (debug_) log_ << " - best_begin/end: " << best_begin << "/" << best_end << std::endl; for (Size b = best_begin; b <= isotopes.optional_begin; ++b) { for (Size e = best_end; e <= isotopes.optional_end; ++e) { //Make sure we have more than 2 peaks (unless in the first loop iteration, there we allow two points) if (isotopes.size() - b - e > 2 || (b == best_begin && e == best_end && isotopes.size() - b - e > 1)) { double int_score = Math::pearsonCorrelationCoefficient(isotopes.intensity.begin() + b, isotopes.intensity.end() - e, pattern.intensity.begin() + b, pattern.intensity.end() - e); if (boost::math::isnan(int_score)) int_score = 0.0; if (isotopes.size() - b - e == 2 && int_score > min_isotope_fit_) int_score = min_isotope_fit_; //special case for the first loop iteration (otherwise the score is 1) if (debug_) log_ << " - fit (" << b << "/" << e << "): " << int_score; if (int_score / best_int_score >= 1.0 + optional_fit_improvement_) { if (debug_) log_ << " - new best fit "; best_int_score = int_score; best_begin = b; best_end = e; } if (debug_) log_ << std::endl; } } } //if the best fit is empty, abort if (pattern.mz_score.size() - best_begin - best_end == 0) { return 0.0; } //remove left out peaks from the beginning for (Size i = 0; i < best_begin; ++i) { pattern.peak[i] = -2; pattern.intensity[i] = 0.0; pattern.mz_score[i] = 0.0; } //remove left out peaks from the end for (Size i = 0; i < best_end; ++i) { pattern.peak[isotopes.size() - 1 - i] = -2; pattern.intensity[isotopes.size() - 1 - i] = 0.0; pattern.mz_score[isotopes.size() - 1 - i] = 0.0; } //calculate m/z score (if required) if (consider_mz_distances) { best_int_score *= std::accumulate(pattern.mz_score.begin() + best_begin, pattern.mz_score.end() - best_end, 0.0) / (pattern.mz_score.size() - best_begin - best_end); } //return final score OPENMS_POSTCONDITION(best_int_score >= 0.0, (String("Internal error: Isotope score (") + best_int_score + ") should be >=0.0").c_str()) OPENMS_POSTCONDITION(best_int_score <= 1.0, (String("Internal error: Isotope score (") + best_int_score + ") should be <=1.0").c_str()) return best_int_score; } /** @brief Compute the intensity score for the peak @p peak in spectrum @p spectrum. The intensity score is computed by interpolating the score between the 4 nearest intensity bins. The scores from the different bins are weighted by the distance of the bin center to the peak. @param spectrum Index of the spectrum we are currently looking at @param peak Index of the peak that should be scored inside the spectrum @p spectrum */ double intensityScore_(Size spectrum, Size peak) const { // calculate (half) bin numbers double intensity = map_[spectrum][peak].getIntensity(); double rt = map_[spectrum].getRT(); double mz = map_[spectrum][peak].getMZ(); double rt_min = map_.getMinRT(); double mz_min = map_.getMinMZ(); UInt rt_bin = std::min(2 * intensity_bins_ - 1, (UInt) std::floor((rt - rt_min) / intensity_rt_step_ * 2.0)); UInt mz_bin = std::min(2 * intensity_bins_ - 1, (UInt) std::floor((mz - mz_min) / intensity_mz_step_ * 2.0)); // determine mz bins UInt ml, mh; if (mz_bin == 0 || mz_bin == 2 * intensity_bins_ - 1) { ml = mz_bin / 2; mh = mz_bin / 2; } else if (Math::isOdd(mz_bin)) { ml = mz_bin / 2; mh = mz_bin / 2 + 1; } else { ml = mz_bin / 2 - 1; mh = mz_bin / 2; } // determine rt bins UInt rl, rh; if (rt_bin == 0 || rt_bin == 2 * intensity_bins_ - 1) { rl = rt_bin / 2; rh = rt_bin / 2; } else if (Math::isOdd(rt_bin)) { rl = rt_bin / 2; rh = rt_bin / 2 + 1; } else { rl = rt_bin / 2 - 1; rh = rt_bin / 2; } // calculate distances to surrounding bin centers (normalized to [0,1]) double drl = std::fabs(rt_min + (0.5 + rl) * intensity_rt_step_ - rt) / intensity_rt_step_; double drh = std::fabs(rt_min + (0.5 + rh) * intensity_rt_step_ - rt) / intensity_rt_step_; double dml = std::fabs(mz_min + (0.5 + ml) * intensity_mz_step_ - mz) / intensity_mz_step_; double dmh = std::fabs(mz_min + (0.5 + mh) * intensity_mz_step_ - mz) / intensity_mz_step_; // Calculate weights for the intensity scores based on the distances to the // bin center(the nearer to better) double d1 = std::sqrt(std::pow(1.0 - drl, 2) + std::pow(1.0 - dml, 2)); double d2 = std::sqrt(std::pow(1.0 - drh, 2) + std::pow(1.0 - dml, 2)); double d3 = std::sqrt(std::pow(1.0 - drl, 2) + std::pow(1.0 - dmh, 2)); double d4 = std::sqrt(std::pow(1.0 - drh, 2) + std::pow(1.0 - dmh, 2)); double d_sum = d1 + d2 + d3 + d4; // Final score .. intensityScore in the surrounding bins, weighted by the distance of the // bin center to the peak double final = intensityScore_(rl, ml, intensity) * (d1 / d_sum) + intensityScore_(rh, ml, intensity) * (d2 / d_sum) + intensityScore_(rl, mh, intensity) * (d3 / d_sum) + intensityScore_(rh, mh, intensity) * (d4 / d_sum); OPENMS_POSTCONDITION(final >= 0.0, (String("Internal error: Intensity score (") + final + ") should be >=0.0").c_str()) OPENMS_POSTCONDITION(final <= 1.0001, (String("Internal error: Intensity score (") + final + ") should be <=1.0").c_str()) return final; } /** @brief Choose a the best trace fitter for the current mass traces based on the user parameter (symmetric, asymmetric) or based on an inspection of the mass trace (auto) @return A pointer to the trace fitter that should be used. */ TraceFitter<PeakType>* chooseTraceFitter_(double& tau) { // choose fitter if (param_.getValue("feature:rt_shape") == "asymmetric") { LOG_DEBUG << "use asymmetric rt peak shape" << std::endl; tau = -1.0; return new EGHTraceFitter<PeakType>(); } else // if (param_.getValue("feature:rt_shape") == "symmetric") { LOG_DEBUG << "use symmetric rt peak shape" << std::endl; return new GaussTraceFitter<PeakType>(); } } double intensityScore_(Size rt_bin, Size mz_bin, double intensity) const { // interpolate score value according to quantiles(20) const std::vector<double>& quantiles20 = intensity_thresholds_[rt_bin][mz_bin]; // get iterator pointing to quantile that is >= intensity std::vector<double>::const_iterator it = std::lower_bound(quantiles20.begin(), quantiles20.end(), intensity); // bigger than the biggest value => return 1.0 if (it == quantiles20.end()) { return 1.0; } // interpolate inside the bin double bin_score = 0.0; if (it == quantiles20.begin()) { bin_score = 0.05 * intensity / *it; } else { // (intensity - vigintile_low) / (vigintile_high - vigintile_low) bin_score = 0.05 * (intensity - *(it - 1)) / (*it - *(it - 1)); } double final = bin_score + 0.05 * ((it - quantiles20.begin()) - 1.0); // determine position of lower bound in the vector //fix numerical problems if (final < 0.0) final = 0.0; if (final > 1.0) final = 1.0; // final = 1/20 * [ index(vigintile_low) + (intensity-vigintile_low) / (vigintile_high - vigintile_low) ] return final; } /** @name Handling of fitted mass traces Methods to handle the results of the mass trace fitting process. */ //@{ /** @brief Creates new mass traces @p new_traces based on the fitting result and the original traces @p traces. @param fitter The TraceFitter containing the results from the rt profile fitting step. @param traces Original mass traces found in the experiment. @param new_traces Mass traces created by cropping the original mass traces. */ void cropFeature_(TraceFitter<PeakType>* fitter, const MassTraces& traces, MassTraces& new_traces) { double low_bound = fitter->getLowerRTBound(); double high_bound = fitter->getUpperRTBound(); if (debug_) log_ << " => RT bounds: " << low_bound << " - " << high_bound << std::endl; for (Size t = 0; t < traces.size(); ++t) { const MassTrace& trace = traces[t]; if (debug_) log_ << " - Trace " << t << ": (" << trace.theoretical_int << ")" << std::endl; MassTrace new_trace; //compute average relative deviation and correlation double deviation = 0.0; std::vector<double> v_theo, v_real; for (Size k = 0; k < trace.peaks.size(); ++k) { //consider peaks when inside RT bounds only if (trace.peaks[k].first >= low_bound && trace.peaks[k].first <= high_bound) { new_trace.peaks.push_back(trace.peaks[k]); double theo = traces.baseline + fitter->computeTheoretical(trace, k); v_theo.push_back(theo); double real = trace.peaks[k].second->getIntensity(); v_real.push_back(real); deviation += std::fabs(real - theo) / theo; } } double fit_score = 0.0; double correlation = 0.0; double final_score = 0.0; if (!new_trace.peaks.empty()) { fit_score = deviation / new_trace.peaks.size(); correlation = std::max(0.0, Math::pearsonCorrelationCoefficient(v_theo.begin(), v_theo.end(), v_real.begin(), v_real.end())); final_score = std::sqrt(correlation * std::max(0.0, 1.0 - fit_score)); } if (debug_) log_ << " - peaks: " << new_trace.peaks.size() << " / " << trace.peaks.size() << " - relative deviation: " << fit_score << " - correlation: " << correlation << " - final score: " << correlation << std::endl; //remove badly fitting traces if (!new_trace.isValid() || final_score < min_trace_score_) { if (t < traces.max_trace) { new_traces = MassTraces(); if (debug_) log_ << " - removed this and previous traces due to bad fit" << std::endl; new_traces.clear(); //remove earlier traces continue; } else if (t == traces.max_trace) { new_traces = MassTraces(); if (debug_) log_ << " - aborting (max trace was removed)" << std::endl; break; } else if (t > traces.max_trace) { if (debug_) log_ << " - removed due to bad fit => omitting the rest" << std::endl; break; //no more traces are possible } } //add new trace else { new_trace.theoretical_int = trace.theoretical_int; new_traces.push_back(new_trace); if (t == traces.max_trace) { new_traces.max_trace = new_traces.size() - 1; } } } new_traces.baseline = traces.baseline; } /** @brief Checks the feature based on different score thresholds and model constraints Feature can get invalid for following reasons: <ul> <li>Invalid fit: Fitted model is bigger than 'max_rt_span'</li> <li>Invalid feature after fit - too few traces or peaks left</li> <li>Invalid fit: Center outside of feature bounds</li> <li>Invalid fit: Less than 'min_rt_span' left after fit</li> <li>Feature quality too low after fit</li> </ul> @param fitter The TraceFitter containing the results from the rt profile fitting step. @param feature_traces Cropped feature mass traces. @param seed_mz Mz of the seed @param min_feature_score Minimal required feature score @param error_msg Will be filled with the error message, if the feature is invalid @param fit_score Will be filled with the fit score @param correlation Will be filled with correlation between feature and model @param final_score Will be filled with the final score @return true if the feature is valid */ bool checkFeatureQuality_(TraceFitter<PeakType>* fitter, MassTraces& feature_traces, const double& seed_mz, const double& min_feature_score, String& error_msg, double& fit_score, double& correlation, double& final_score) { bool feature_ok = true; //check if the sigma fit was ok (if it is larger than 'max_rt_span') if (feature_ok) { // 5.0 * sigma > max_rt_span_ * region_rt_span if (fitter->checkMaximalRTSpan(max_rt_span_)) { feature_ok = false; error_msg = "Invalid fit: Fitted model is bigger than 'max_rt_span'"; } } //check if the feature is valid if (!feature_traces.isValid(seed_mz, trace_tolerance_)) { feature_ok = false; error_msg = "Invalid feature after fit - too few traces or peaks left"; } //check if x0 is inside feature bounds if (feature_ok) { std::pair<double, double> rt_bounds = feature_traces.getRTBounds(); if (fitter->getCenter() < rt_bounds.first || fitter->getCenter() > rt_bounds.second) { feature_ok = false; error_msg = "Invalid fit: Center outside of feature bounds"; } } //check if the remaining traces fill out at least 'min_rt_span' of the RT span if (feature_ok) { std::pair<double, double> rt_bounds = feature_traces.getRTBounds(); if (fitter->checkMinimalRTSpan(rt_bounds, min_rt_span_)) { feature_ok = false; error_msg = "Invalid fit: Less than 'min_rt_span' left after fit"; } } //check if feature quality is high enough (average relative deviation and correlation of the whole feature) if (feature_ok) { std::vector<double> v_theo, v_real; double deviation = 0.0; for (Size t = 0; t < feature_traces.size(); ++t) { MassTrace& trace = feature_traces[t]; for (Size k = 0; k < trace.peaks.size(); ++k) { // was double theo = new_traces.baseline + trace.theoretical_int * height * exp(-0.5 * pow(trace.peaks[k].first - x0, 2) / pow(sigma, 2) ); double theo = feature_traces.baseline + fitter->computeTheoretical(trace, k); v_theo.push_back(theo); double real = trace.peaks[k].second->getIntensity(); v_real.push_back(real); deviation += std::fabs(real - theo) / theo; } } fit_score = std::max(0.0, 1.0 - (deviation / feature_traces.getPeakCount())); correlation = std::max(0.0, Math::pearsonCorrelationCoefficient(v_theo.begin(), v_theo.end(), v_real.begin(), v_real.end())); final_score = std::sqrt(correlation * fit_score); if (final_score < min_feature_score) { feature_ok = false; error_msg = "Feature quality too low after fit"; } //quality output if (debug_) { log_ << "Quality estimation:" << std::endl; log_ << " - relative deviation: " << fit_score << std::endl; log_ << " - correlation: " << correlation << std::endl; log_ << " => final score: " << final_score << std::endl; } } return feature_ok; } /** @brief Creates several files containing plots and viewable data of the fitted mass trace @param fitter The TraceFitter containing the results from the rt profile fitting step. @param traces Original mass traces found in the spectra @param new_traces Cropped feature mass traces @param feature_ok Status of the feature @param error_msg If the feature is invalid, @p error_msg contains the reason @param final_score Final score of the feature @param plot_nr Index of the feature @param peak The Seed Peak @param path The path where to put the debug files (default is debug/features) */ void writeFeatureDebugInfo_(TraceFitter<PeakType>* fitter, const MassTraces& traces, const MassTraces& new_traces, bool feature_ok, const String error_msg, const double final_score, const Int plot_nr, const PeakType& peak, const String path = "debug/features/") { double pseudo_rt_shift = param_.getValue("debug:pseudo_rt_shift"); TextFile tf; //gnuplot script String script = String("plot \"") + path + plot_nr + ".dta\" title 'before fit (RT: " + String::number(fitter->getCenter(), 2) + " m/z: " + String::number(peak.getMZ(), 4) + ")' with points 1"; //feature before fit for (Size k = 0; k < traces.size(); ++k) { for (Size j = 0; j < traces[k].peaks.size(); ++j) { tf.push_back(String(pseudo_rt_shift * k + traces[k].peaks[j].first) + "\t" + traces[k].peaks[j].second->getIntensity()); } } tf.store(path + plot_nr + ".dta"); //fitted feature if (new_traces.getPeakCount() != 0) { tf.clear(); for (Size k = 0; k < new_traces.size(); ++k) { for (Size j = 0; j < new_traces[k].peaks.size(); ++j) { tf.push_back(String(pseudo_rt_shift * k + new_traces[k].peaks[j].first) + "\t" + new_traces[k].peaks[j].second->getIntensity()); } } tf.store(path + plot_nr + "_cropped.dta"); script = script + ", \"" + path + plot_nr + "_cropped.dta\" title 'feature "; if (!feature_ok) { script = script + " - " + error_msg; } else { script = script + (features_->size() + 1) + " (score: " + String::number(final_score, 3) + ")"; } script = script + "' with points 3"; } //fitted functions tf.clear(); for (Size k = 0; k < traces.size(); ++k) { char fun = 'f'; fun += (char)k; tf.push_back(fitter->getGnuplotFormula(traces[k], fun, traces.baseline, pseudo_rt_shift * k)); //tf.push_back(String(fun)+"(x)= " + traces.baseline + " + " + fitter->getGnuplotFormula(traces[k], pseudo_rt_shift * k)); script = script + ", " + fun + "(x) title 'Trace " + k + " (m/z: " + String::number(traces[k].getAvgMZ(), 4) + ")'"; } //output tf.push_back("set xlabel \"pseudo RT (mass traces side-by-side)\""); tf.push_back("set ylabel \"intensity\""); tf.push_back("set samples 1000"); tf.push_back(script); tf.push_back("pause -1"); tf.store(path + plot_nr + ".plot"); } //@} private: /// Not implemented FeatureFinderAlgorithmPicked& operator=(const FeatureFinderAlgorithmPicked&); /// Not implemented FeatureFinderAlgorithmPicked(const FeatureFinderAlgorithmPicked&); }; } // namespace OpenMS #endif // OPENMS_TRANSFORMATIONS_FEATUREFINDER_FEATUREFINDERALGORITHMPICKED_H
GB_unop__identity_fc32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_fp32) // op(A') function: GB (_unop_tran__identity_fc32_fp32) // C type: GxB_FC32_t // A type: float // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_fp32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rawBLAKE2_512_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2012 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. * * Re-used for BLAKE2 by Dhiru Kholia (dhiru at openwall.com) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawBLAKE2; #elif FMT_REGISTERS_H john_register_one(&fmt_rawBLAKE2); #else #include "arch.h" #include "blake2.h" #include "params.h" #include "common.h" #include "formats.h" #include <string.h> #ifdef _OPENMP #define OMP_SCALE 2048 #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "Raw-Blake2" #define FORMAT_NAME "" #if defined(__AVX__) #define ALGORITHM_NAME "AVX" #elif defined(__XOP__) #define ALGORITHM_NAME "XOP" #elif defined(__SSE4_1__) #define ALGORITHM_NAME "SSE4.1" #elif defined(__SSSE3__) #define ALGORITHM_NAME "SSSE3" #elif defined(__SSE2__) #define ALGORITHM_NAME "SSE2" #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 128 #define BINARY_SIZE 64 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"4245af08b46fbb290222ab8a68613621d92ce78577152d712467742417ebc1153668f1c9e1ec1e152a32a9c242dc686d175e087906377f0c483c5be2cb68953e", "blake2"}, {"$BLAKE2$021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0", "hello world"}, /* hash generated by multiple versions (in C and Go) of b2sum program */ {"$BLAKE2$1f7d9b7c9a90f7bfc66e52b69f3b6c3befbd6aee11aac860e99347a495526f30c9e51f6b0db01c24825092a09dd1a15740f0ade8def87e60c15da487571bcef7", "verystrongandlongpassword"}, /* test vectors from Wikipedia */ {"$BLAKE2$a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918", "The quick brown fox jumps over the lazy dog"}, {"$BLAKE2$786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce", ""}, {NULL} }; static int (*saved_key_length); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out) [(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, "$BLAKE2$", 8)) p += 8; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[8 + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, "$BLAKE2$", 8)) return ciphertext; memcpy(out, "$BLAKE2$", 8); memcpy(out + 8, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + 8); return out; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xF; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xFF; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xFFF; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xFFFF; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xFFFFF; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xFFFFFF; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7FFFFFF; } static void set_key(char *key, int index) { int len = strlen(key); saved_key_length[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_key_length[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { (void)blake2b((uint8_t *)crypt_out[index], saved_key[index], NULL, 64, saved_key_length[index], 0); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_rawBLAKE2 = { { FORMAT_LABEL, FORMAT_NAME, "BLAKE2b 512 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { double begin = omp_get_wtime(); //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads = atol(argv[1]); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number #pragma omp parallel { int rank = omp_get_thread_num(); int size = omp_get_num_threads(); long int seed = rank; srand48_r(seed, drandData+rank); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; #pragma omp parallel for \ reduction(+:Ncircle) for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; int rank = omp_get_thread_num(); //gererate two random numbers (use the thread id to offset drandData) drand48_r(drandData+rank, &rand1); drand48_r(drandData+rank, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); printf("Our estimate of pi is %g \n", pi); } } double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g \n", pi); printf("Time: \t %f \n", omp_get_wtime()-begin); free(drandData); return 0; }
tetrahedron_method.c
/* tetrahedron_method.c */ /* Copyright (C) 2014 Atsushi Togo */ #include "mathfunc.h" #include "kpoint.h" #ifdef THMWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif /* 6-------7 */ /* /| /| */ /* / | / | */ /* 4-------5 | */ /* | 2----|--3 */ /* | / | / */ /* |/ |/ */ /* 0-------1 */ /* */ /* i: vec neighbours */ /* 0: O 1, 2, 4 */ /* 1: a 0, 3, 5 */ /* 2: b 0, 3, 6 */ /* 3: a + b 1, 2, 7 */ /* 4: c 0, 5, 6 */ /* 5: c + a 1, 4, 7 */ /* 6: c + b 2, 4, 7 */ /* 7: c + a + b 3, 5, 6 */ static int main_diagonals[4][3] = {{ 1, 1, 1}, /* 0-7 */ {-1, 1, 1}, /* 1-6 */ { 1,-1, 1}, /* 2-5 */ { 1, 1,-1}}; /* 3-4 */ static int db_relative_grid_address[4][24][4][3] = { { { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, {-1, 1, 0}, {-1, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, {-1, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 1, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, {-1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, 1, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 1, -1, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 0, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, -1}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, -1, -1}, { 0, 0, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, -1, -1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, -1, 0}, }, { { 0, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 0, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, -1, 1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, -1, 1}, { 0, 0, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, {-1, 1, -1}, }, { { 0, 0, 0}, {-1, 0, -1}, {-1, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, 1, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 1, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, {-1, 1, 0}, {-1, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 1, -1, 0}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, {-1, 0, 0}, }, }, }; static void get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, SPGCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])); static double get_integration_weight(const double omega, SPGCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])); static int get_main_diagonal(SPGCONST double rec_lattice[3][3]); static int sort_omegas(double v[4]); static double _f(const int n, const int m, const double omega, const double vertices_omegas[4]); static double _J(const int i, const int ci, const double omega, const double vertices_omegas[4]); static double _I(const int i, const int ci, const double omega, const double vertices_omegas[4]); static double _n(const int i, const double omega, const double vertices_omegas[4]); static double _g(const int i, const double omega, const double vertices_omegas[4]); static double _n_0(void); static double _n_1(const double omega, const double vertices_omegas[4]); static double _n_2(const double omega, const double vertices_omegas[4]); static double _n_3(const double omega, const double vertices_omegas[4]); static double _n_4(void); static double _g_0(void); static double _g_1(const double omega, const double vertices_omegas[4]); static double _g_2(const double omega, const double vertices_omegas[4]); static double _g_3(const double omega, const double vertices_omegas[4]); static double _g_4(void); static double _J_0(void); static double _J_10(const double omega, const double vertices_omegas[4]); static double _J_11(const double omega, const double vertices_omegas[4]); static double _J_12(const double omega, const double vertices_omegas[4]); static double _J_13(const double omega, const double vertices_omegas[4]); static double _J_20(const double omega, const double vertices_omegas[4]); static double _J_21(const double omega, const double vertices_omegas[4]); static double _J_22(const double omega, const double vertices_omegas[4]); static double _J_23(const double omega, const double vertices_omegas[4]); static double _J_30(const double omega, const double vertices_omegas[4]); static double _J_31(const double omega, const double vertices_omegas[4]); static double _J_32(const double omega, const double vertices_omegas[4]); static double _J_33(const double omega, const double vertices_omegas[4]); static double _J_4(void); static double _I_0(void); static double _I_10(const double omega, const double vertices_omegas[4]); static double _I_11(const double omega, const double vertices_omegas[4]); static double _I_12(const double omega, const double vertices_omegas[4]); static double _I_13(const double omega, const double vertices_omegas[4]); static double _I_20(const double omega, const double vertices_omegas[4]); static double _I_21(const double omega, const double vertices_omegas[4]); static double _I_22(const double omega, const double vertices_omegas[4]); static double _I_23(const double omega, const double vertices_omegas[4]); static double _I_30(const double omega, const double vertices_omegas[4]); static double _I_31(const double omega, const double vertices_omegas[4]); static double _I_32(const double omega, const double vertices_omegas[4]); static double _I_33(const double omega, const double vertices_omegas[4]); static double _I_4(void); void thm_get_relative_grid_address(int relative_grid_address[24][4][3], SPGCONST double rec_lattice[3][3]) { int i, j, k, main_diag_index; main_diag_index = get_main_diagonal(rec_lattice); for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 3; k++) { relative_grid_address[i][j][k] = db_relative_grid_address[main_diag_index][i][j][k]; } } } } void thm_get_all_relative_grid_address(int relative_grid_address[4][24][4][3]) { int i, j, k, main_diag_index; for (main_diag_index = 0; main_diag_index < 4; main_diag_index++) { for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 3; k++) { relative_grid_address[main_diag_index][i][j][k] = db_relative_grid_address[main_diag_index][i][j][k]; } } } } } double thm_get_integration_weight(const double omega, SPGCONST double tetrahedra_omegas[24][4], const char function) { if (function == 'I') { return get_integration_weight(omega, tetrahedra_omegas, _g, _I); } else { return get_integration_weight(omega, tetrahedra_omegas, _n, _J); } } void thm_get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, SPGCONST double tetrahedra_omegas[24][4], const char function) { if (function == 'I') { get_integration_weight_at_omegas(integration_weights, num_omegas, omegas, tetrahedra_omegas, _g, _I); } else { get_integration_weight_at_omegas(integration_weights, num_omegas, omegas, tetrahedra_omegas, _n, _J); } } void thm_get_neighboring_grid_points(int neighboring_grid_points[], const int grid_point, SPGCONST int relative_grid_address[][3], const int num_relative_grid_address, const int mesh[3], SPGCONST int bz_grid_address[][3], const int bz_map[]) { int bzmesh[3], address_double[3], bz_address_double[3]; int i, j, bz_gp; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { address_double[j] = (bz_grid_address[grid_point][j] + relative_grid_address[i][j]) * 2; bz_address_double[j] = address_double[j]; } bz_gp = bz_map[kpt_get_grid_point_double_mesh(bz_address_double, bzmesh)]; if (bz_gp == -1) { neighboring_grid_points[i] = kpt_get_grid_point_double_mesh(address_double, mesh); } else { neighboring_grid_points[i] = bz_gp; } } } static void get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, SPGCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])) { int i; #pragma omp parallel for for (i = 0; i < num_omegas; i++) { integration_weights[i] = get_integration_weight(omegas[i], tetrahedra_omegas, gn, IJ); } } static double get_integration_weight(const double omega, SPGCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])) { int i, j, ci; double sum; double v[4]; sum = 0; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { v[j] = tetrahedra_omegas[i][j]; } ci = sort_omegas(v); if (omega < v[0]) { sum += IJ(0, ci, omega, v) * gn(0, omega, v); } else { if (v[0] < omega && omega < v[1]) { sum += IJ(1, ci, omega, v) * gn(1, omega, v); } else { if (v[1] < omega && omega < v[2]) { sum += IJ(2, ci, omega, v) * gn(2, omega, v); } else { if (v[2] < omega && omega < v[3]) { sum += IJ(3, ci, omega, v) * gn(3, omega, v); } else { if (v[3] < omega) { sum += IJ(4, ci, omega, v) * gn(4, omega, v); } } } } } } return sum / 6; } static int sort_omegas(double v[4]) { int i; double w[4]; i = 0; if (v[0] > v[1]) { w[0] = v[1]; w[1] = v[0]; i = 1; } else { w[0] = v[0]; w[1] = v[1]; } if (v[2] > v[3]) { w[2] = v[3]; w[3] = v[2]; } else { w[2] = v[2]; w[3] = v[3]; } if (w[0] > w[2]) { v[0] = w[2]; v[1] = w[0]; if (i == 0) { i = 4; } } else { v[0] = w[0]; v[1] = w[2]; } if (w[1] > w[3]) { v[3] = w[1]; v[2] = w[3]; if (i == 1) { i = 3; } } else { v[3] = w[3]; v[2] = w[1]; if (i == 1) { i = 5; } } if (v[1] > v[2]) { w[1] = v[1]; v[1] = v[2]; v[2] = w[1]; if (i == 4) { i = 2; } if (i == 5) { i = 1; } } else { if (i == 4) { i = 1; } if (i == 5) { i = 2; } } return i; } static int get_main_diagonal(SPGCONST double rec_lattice[3][3]) { int i, shortest; double length, min_length; double main_diag[3]; shortest = 0; mat_multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[0]); min_length = mat_norm_squared_d3(main_diag); for (i = 1; i < 4; i++) { mat_multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[i]); length = mat_norm_squared_d3(main_diag); if (min_length > length) { min_length = length; shortest = i; } } return shortest; } static double _f(const int n, const int m, const double omega, const double vertices_omegas[4]) { return ((omega - vertices_omegas[m]) / (vertices_omegas[n] - vertices_omegas[m])); } static double _J(const int i, const int ci, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _J_0(); case 1: switch (ci) { case 0: return _J_10(omega, vertices_omegas); case 1: return _J_11(omega, vertices_omegas); case 2: return _J_12(omega, vertices_omegas); case 3: return _J_13(omega, vertices_omegas); } case 2: switch (ci) { case 0: return _J_20(omega, vertices_omegas); case 1: return _J_21(omega, vertices_omegas); case 2: return _J_22(omega, vertices_omegas); case 3: return _J_23(omega, vertices_omegas); } case 3: switch (ci) { case 0: return _J_30(omega, vertices_omegas); case 1: return _J_31(omega, vertices_omegas); case 2: return _J_32(omega, vertices_omegas); case 3: return _J_33(omega, vertices_omegas); } case 4: return _J_4(); } warning_print("******* Warning *******\n"); warning_print(" J is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _I(const int i, const int ci, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _I_0(); case 1: switch (ci) { case 0: return _I_10(omega, vertices_omegas); case 1: return _I_11(omega, vertices_omegas); case 2: return _I_12(omega, vertices_omegas); case 3: return _I_13(omega, vertices_omegas); } case 2: switch (ci) { case 0: return _I_20(omega, vertices_omegas); case 1: return _I_21(omega, vertices_omegas); case 2: return _I_22(omega, vertices_omegas); case 3: return _I_23(omega, vertices_omegas); } case 3: switch (ci) { case 0: return _I_30(omega, vertices_omegas); case 1: return _I_31(omega, vertices_omegas); case 2: return _I_32(omega, vertices_omegas); case 3: return _I_33(omega, vertices_omegas); } case 4: return _I_4(); } warning_print("******* Warning *******\n"); warning_print(" I is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _n(const int i, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _n_0(); case 1: return _n_1(omega, vertices_omegas); case 2: return _n_2(omega, vertices_omegas); case 3: return _n_3(omega, vertices_omegas); case 4: return _n_4(); } warning_print("******* Warning *******\n"); warning_print(" n is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _g(const int i, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _g_0(); case 1: return _g_1(omega, vertices_omegas); case 2: return _g_2(omega, vertices_omegas); case 3: return _g_3(omega, vertices_omegas); case 4: return _g_4(); } warning_print("******* Warning *******\n"); warning_print(" g is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } /* omega < omega1 */ static double _n_0(void) { return 0.0; } /* omega1 < omega < omega2 */ static double _n_1(const double omega, const double vertices_omegas[4]) { return (_f(1, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(3, 0, omega, vertices_omegas)); } /* omega2 < omega < omega3 */ static double _n_2(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas)); } /* omega2 < omega < omega3 */ static double _n_3(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)); } /* omega4 < omega */ static double _n_4(void) { return 1.0; } /* omega < omega1 */ static double _g_0(void) { return 0.0; } /* omega1 < omega < omega2 */ static double _g_1(const double omega, const double vertices_omegas[4]) { return (3 * _f(1, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) / (vertices_omegas[3] - vertices_omegas[0])); } /* omega2 < omega < omega3 */ static double _g_2(const double omega, const double vertices_omegas[4]) { return (3 / (vertices_omegas[3] - vertices_omegas[0]) * (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))); } /* omega3 < omega < omega4 */ static double _g_3(const double omega, const double vertices_omegas[4]) { return (3 * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) / (vertices_omegas[3] - vertices_omegas[0])); } /* omega4 < omega */ static double _g_4(void) { return 0.0; } static double _J_0(void) { return 0.0; } static double _J_10(const double omega, const double vertices_omegas[4]) { return (1.0 + _f(0, 1, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) + _f(0, 3, omega, vertices_omegas)) / 4; } static double _J_11(const double omega, const double vertices_omegas[4]) { return _f(1, 0, omega, vertices_omegas) / 4; } static double _J_12(const double omega, const double vertices_omegas[4]) { return _f(2, 0, omega, vertices_omegas) / 4; } static double _J_13(const double omega, const double vertices_omegas[4]) { return _f(3, 0, omega, vertices_omegas) / 4; } static double _J_20(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (1.0 + _f(0, 3, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * (1.0 + _f(0, 3, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas); } static double _J_21(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (1.0 + _f(1, 3, omega, vertices_omegas) + _f(1, 2, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (_f(1, 3, omega, vertices_omegas) + _f(1, 2, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas); } static double _J_22(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * (_f(2, 1, omega, vertices_omegas) + _f(2, 0, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas); } static double _J_23(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(3, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (_f(3, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * _f(3, 0, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas); } static double _J_30(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_31(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_32(const double omega, const double vertices_omegas[4]) { return (1.0 + _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_33(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) * (1.0 + _f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) + _f(3, 2, omega, vertices_omegas))) / 4 / _n_3(omega, vertices_omegas); } static double _J_4(void) { return 0.25; } static double _I_0(void) { return 0.0; } static double _I_10(const double omega, const double vertices_omegas[4]) { return (_f(0, 1, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) + _f(0, 3, omega, vertices_omegas)) / 3; } static double _I_11(const double omega, const double vertices_omegas[4]) { return _f(1, 0, omega, vertices_omegas) / 3; } static double _I_12(const double omega, const double vertices_omegas[4]) { return _f(2, 0, omega, vertices_omegas) / 3; } static double _I_13(const double omega, const double vertices_omegas[4]) { return _f(3, 0, omega, vertices_omegas) / 3; } static double _I_20(const double omega, const double vertices_omegas[4]) { return (_f(0, 3, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_21(const double omega, const double vertices_omegas[4]) { return (_f(1, 2, omega, vertices_omegas) + _f(1, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_22(const double omega, const double vertices_omegas[4]) { return (_f(2, 1, omega, vertices_omegas) + _f(2, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_23(const double omega, const double vertices_omegas[4]) { return (_f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_30(const double omega, const double vertices_omegas[4]) { return _f(0, 3, omega, vertices_omegas) / 3; } static double _I_31(const double omega, const double vertices_omegas[4]) { return _f(1, 3, omega, vertices_omegas) / 3; } static double _I_32(const double omega, const double vertices_omegas[4]) { return _f(2, 3, omega, vertices_omegas) / 3; } static double _I_33(const double omega, const double vertices_omegas[4]) { return (_f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) + _f(3, 2, omega, vertices_omegas)) / 3; } static double _I_4(void) { return 0.0; }
GB_convert_hyper_to_sparse.c
//------------------------------------------------------------------------------ // GB_convert_hyper_to_sparse: convert a matrix from hypersparse to sparse //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // On input, the matrix may have shallow A->p and A->h content; it is safely // removed. On output, the matrix is always non-hypersparse (even if out of // memory). If the input matrix is hypersparse, it is given a new A->p that is // not shallow. If the input matrix is already non-hypersparse, nothing is // changed (and in that case A->p remains shallow on output if shallow on // input). The A->x and A->i content is not changed; it remains in whatever // shallow/non-shallow/iso property that it had on input). // If an out-of-memory condition occurs, all content of the matrix is cleared. // If the input matrix A is sparse, bitmap or full, it is unchanged. #include "GB.h" GB_PUBLIC GrB_Info GB_convert_hyper_to_sparse // convert hypersparse to sparse ( GrB_Matrix A, // matrix to convert to non-hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A being converted from hyper to sparse", GB0) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // convert A from hypersparse to sparse //-------------------------------------------------------------------------- if (GB_IS_HYPERSPARSE (A)) { //---------------------------------------------------------------------- // determine the number of threads to use //---------------------------------------------------------------------- GBURBLE ("(hyper to sparse) ") ; int64_t n = A->vdim ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //---------------------------------------------------------------------- // allocate the new Ap array, of size n+1 //---------------------------------------------------------------------- int64_t *restrict Ap_new = NULL ; size_t Ap_new_size = 0 ; Ap_new = GB_MALLOC (n+1, int64_t, &Ap_new_size) ; if (Ap_new == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } #ifdef GB_DEBUG // to ensure all values of Ap_new are assigned below. for (int64_t j = 0 ; j <= n ; j++) Ap_new [j] = -99999 ; #endif //---------------------------------------------------------------------- // get the old hyperlist //---------------------------------------------------------------------- int64_t nvec = A->nvec ; // # of vectors in Ah_old int64_t *restrict Ap_old = A->p ; // size nvec+1 int64_t *restrict Ah_old = A->h ; // size nvec int64_t nvec_nonempty = 0 ; // recompute A->nvec_nonempty int64_t anz = GB_nnz (A) ; //---------------------------------------------------------------------- // construct the new vector pointers //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nvec_nonempty) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, my_nvec_nonempty = 0 ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; ASSERT (0 <= jstart && jstart <= jend && jend <= n) ; // task tid computes Ap_new [jstart:jend-1] from Ap_old, Ah_old. // GB_SPLIT_BINARY_SEARCH of Ah_old [0..nvec-1] for jstart: // If found is true then Ah_old [k] == jstart. // If found is false, and nvec > 0 then // Ah_old [0 ... k-1] < jstart < Ah_old [k ... nvec-1] // Whether or not i is found, if nvec > 0 // Ah_old [0 ... k-1] < jstart <= Ah_old [k ... nvec-1] // If nvec == 0, then k == 0 and found will be false. In this // case, jstart cannot be compared with any content of Ah_old, // since Ah_old is completely empty (Ah_old [0] is invalid). int64_t k = 0, pright = nvec-1 ; bool found ; GB_SPLIT_BINARY_SEARCH (jstart, Ah_old, k, pright, found) ; ASSERT (k >= 0 && k <= nvec) ; ASSERT (GB_IMPLIES (nvec == 0, !found && k == 0)) ; ASSERT (GB_IMPLIES (found, jstart == Ah_old [k])) ; ASSERT (GB_IMPLIES (!found && k < nvec, jstart < Ah_old [k])) ; // Let jk = Ah_old [k], jlast = Ah_old [k-1], and pk = Ah_old [k]. // Then Ap_new [jlast+1:jk] must be set to pk. This must be done // for all k = 0:nvec-1. In addition, the last vector k=nvec-1 // must be terminated by setting Ap_new [jk+1:n-1] to Ap_old [nvec]. // A task owns the kth vector if jk is in jstart:jend-1, inclusive. // It counts all non-empty vectors that it owns. However, the task // must also set Ap_new [...] = pk for any jlast+1:jk that overlaps // jstart:jend-1, even if it does not own that particular vector k. // This happens only at the tail end of jstart:jend-1. int64_t jlast = (k == 0) ? (-1) : Ah_old [k-1] ; jlast = GB_IMAX (jstart-1, jlast) ; bool done = false ; for ( ; k <= nvec && !done ; k++) { //-------------------------------------------------------------- // get the kth vector in Ah_old, which is vector index jk. //-------------------------------------------------------------- int64_t jk = (k < nvec) ? Ah_old [k] : n ; int64_t pk = (k < nvec) ? Ap_old [k] : anz ; //-------------------------------------------------------------- // determine if this task owns jk //-------------------------------------------------------------- int64_t jfin ; if (jk >= jend) { // This is the last iteration for this task. This task // does not own the kth vector. However, it does own the // vector indices jlast+1:jend-1, and these vectors must // be handled by this task. jfin = jend - 1 ; done = true ; } else { // This task owns the kth vector, which is vector index jk. // Ap must be set to pk for all vector indices jlast+1:jk. jfin = jk ; ASSERT (k >= 0 && k < nvec && nvec > 0) ; if (pk < Ap_old [k+1]) my_nvec_nonempty++ ; } //-------------------------------------------------------------- // set Ap_new for this vector //-------------------------------------------------------------- // Ap_new [jlast+1:jk] must be set to pk. This tasks handles // the intersection of jlast+1:jk with jstart:jend-1. for (int64_t j = jlast+1 ; j <= jfin ; j++) { Ap_new [j] = pk ; } //-------------------------------------------------------------- // keep track of the prior vector index //-------------------------------------------------------------- jlast = jk ; } nvec_nonempty += my_nvec_nonempty ; //------------------------------------------------------------------ // no task owns Ap_new [n] so it is set by the last task //------------------------------------------------------------------ if (tid == ntasks-1) { ASSERT (jend == n) ; Ap_new [n] = anz ; } } // free the old A->p and A->h hyperlist content. // this clears A->nvec_nonempty so it must be restored below. GB_ph_free (A) ; // transplant the new vector pointers; matrix is no longer hypersparse A->p = Ap_new ; A->p_size = Ap_new_size ; A->h = NULL ; A->nvec = n ; A->nvec_nonempty = nvec_nonempty ; A->plen = n ; A->p_shallow = false ; A->h_shallow = false ; A->magic = GB_MAGIC ; ASSERT (anz == GB_nnz (A)) ; //---------------------------------------------------------------------- // A is now sparse //---------------------------------------------------------------------- ASSERT (GB_IS_SPARSE (A)) ; } //-------------------------------------------------------------------------- // A is now in sparse form (or left as full or bitmap) //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A converted to sparse (or left as-is)", GB0) ; ASSERT (!GB_IS_HYPERSPARSE (A)) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; return (GrB_SUCCESS) ; }
GB_bitmap_assign_C_whole_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_C_whole_template: iterate over a bitmap matrix C //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop, // which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C // matrix held in bitmap form. If the mask matrix is also a bitmap matrix or // full matrix, the GB_GET_MIJ macro can compute the effective value of the // mask for the C(iC,jC) entry. // C must be bitmap or full. If M is accessed, it must also be bitmap or full. #ifndef GB_GET_MIJ #define GB_GET_MIJ(mij,pM) ; #endif { // iterate over all of C(:,:). int nthreads = GB_nthreads (cnzmax, chunk, nthreads_max) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t pC_start, pC_end, task_cnvals = 0 ; GB_PARTITION (pC_start, pC_end, cnzmax, tid, nthreads) ; for (int64_t pC = pC_start ; pC < pC_end ; pC++) { // int64_t iC = pC % cvlen ; // int64_t jC = pC / cvlen ; GB_GET_MIJ (mij, pC) ; // mij = Mask (pC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } }
truedepsingleelement-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // race condition due to a[i]= .. --> .. a[0] #include <stdlib.h> int main (int argc, char* argv[]) { int len=1000; int i; if (argc>1) len = atoi(argv[1]); int a[len]; a[0] = 2; #pragma omp parallel for for (i=0;i<len;i++) a[i]=a[i]+a[0]; return 0; }
GB_unaryop__identity_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_fp32 // op(A') function: GB_tran__identity_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_fp32 ( bool *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pipar.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define NUM_THREADS 4 static long steps = 100; double step; int main(int argc, const char* argv[]) { double pi = 0.0; int nthreads; step = 1.0 / (double)steps; double start, delta, sum[NUM_THREADS]; start = omp_get_wtime(); omp_set_num_threads(NUM_THREADS); #pragma omp parallel { double x; int id, i; id = omp_get_thread_num(); for (i = id, sum[id] = 0.0; i < steps; i = i + NUM_THREADS) { x = (i + 0.5) * step; sum[id] += 4.0 / (1.0 + x * x); } } for (int i = 0; i < NUM_THREADS; i++) pi += sum[i] * step; delta = omp_get_wtime() - start; printf("PI = %.16g computed in %.4g seconds with %d threads.\n", pi, delta, NUM_THREADS); }
fused_rowwise_nbitfake_conversion_ops.h
#pragma once #ifdef _OPENMP #include <omp.h> #endif #include "caffe2/core/context.h" #include "caffe2/core/logging.h" #include "caffe2/core/operator.h" #include "caffe2/operators/reducer_functors.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace internal { inline bool is_little_endian() { constexpr std::int32_t kValue = 1; return reinterpret_cast<const std::uint8_t*>(&kValue)[0] == 1; } void convertfp32fp32(float* dst, const float* src, size_t N); void convertfp16fp32(float* dst, const at::Half* src, size_t N); /** * @params Xmin initial solution passed and potentiall better solution returns * @params Xmax initial solution passed and potentiall better solution returns */ void param_search_greedy( const float* X, int N, const int n_bins, // = 200, const float ratio, // = 0.16, float& Xmin, float& Xmax, int bit_rate); } // namespace internal // Fake 2/4 bit quantization // Creeates a 2/4bit rowwise quantized blob with scales and biases in fp16 // The storage format is 8 bit rowwise with scales and biases in fp32 template < int BIT_RATE, typename T, void (*convert)(float* dst, const T* src, size_t N), bool GREEDY = false> class FloatToFusedNBitFakeRowwiseQuantizedOp final : public Operator<CPUContext> { public: FloatToFusedNBitFakeRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws) : Operator<CPUContext>(def, ws) {} ~FloatToFusedNBitFakeRowwiseQuantizedOp() override {} bool RunOnDevice() override { CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness"); const auto& input = Input(DATA_FLOAT); const auto input_rows = input.size(0); const auto input_columns = input.size(1); CAFFE_ENFORCE_EQ(input.dim(), 2, "Expect input to be a matrix"); const std::vector<int64_t> output_dimensions = {input_rows, input_columns + 8}; auto* output = Output( DATA_FUSED_SCALE_BIAS_INT8, output_dimensions, at::dtype<uint8_t>()); const auto* input_data = input.template data<T>(); auto* output_data = output->template mutable_data<uint8_t>(); const auto output_columns = output->size(1); if (!std::is_same<T, float>::value && !std::is_same<T, at::Half>::value) { CAFFE_THROW("Unsupported data type"); } bool use_openmp = GREEDY; #ifdef _OPENMP vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1)); #else vector<float> tmp_vec(input_columns); #endif #pragma omp parallel for if (GREEDY) for (int row = 0; row < input_rows; ++row) { float* tmp = tmp_vec.data(); #ifdef _OPENMP if (GREEDY) { tmp = &tmp_vec[omp_get_thread_num() * input_columns]; } #endif convert(tmp, input_data + row * input_columns, input_columns); uint8_t* output_row = output_data + row * output_columns; float* output_row_scale_bias = reinterpret_cast<float*>(output_row + input_columns); float minimum_element = *std::min_element(tmp, tmp + input_columns); float maximum_element = *std::max_element(tmp, tmp + input_columns); if (GREEDY) { internal::param_search_greedy( tmp, input_columns, 200, 0.16, minimum_element, maximum_element, BIT_RATE); } minimum_element = static_cast<at::Half>(minimum_element); const float range = maximum_element - minimum_element; const float scale = range == 0 ? 1.0f : static_cast<float>(static_cast<at::Half>( range / static_cast<float>((1 << BIT_RATE) - 1))); const float inverse_scale = 1.0f / scale; output_row_scale_bias[0] = scale; output_row_scale_bias[1] = minimum_element; for (size_t col = 0; col < input_columns; ++col) { output_row[col] = std::max( 0, std::min<int>( std::lrintf((tmp[col] - minimum_element) * inverse_scale), (1 << BIT_RATE) - 1)); } } return true; } private: INPUT_TAGS(DATA_FLOAT); // INT8 suffix because this is a fake quantization operator whose output // type is always 8-bit regardless of BIT_RATE. OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8); }; } // namespace caffe2
depend-5.c
#include <stdlib.h> __attribute__((noinline, noclone)) void f1 (int ifval) { int x = 1, y = 2, z = 3; #pragma omp parallel #pragma omp single { #pragma omp task shared (x) depend(out: x) x = 2; #pragma omp task shared (x) depend(inout: x) { if (x != 2) abort (); x = 3; } #pragma omp task shared (x) depend(inout: x) { if (x != 3) abort (); x = 4; } #pragma omp task shared (z) depend(in: z) if (z != 3) abort (); #pragma omp task shared (z) depend(in: z) if (z != 3) abort (); #pragma omp task shared (z) depend(in: z) if (z != 3) abort (); #pragma omp task shared (z) depend(in: z) if (z != 3) abort (); #pragma omp task shared (z) depend(in: z) if (z != 3) abort (); #pragma omp task shared (z) depend(in: z) if (z != 3) abort (); #pragma omp task shared (y) depend(in: y) if (y != 2) abort (); #pragma omp task shared (y) depend(in: y) if (y != 2) abort (); #pragma omp task shared (y) depend(in: y) if (y != 2) abort (); #pragma omp task shared (y) depend(in: y) if (y != 2) abort (); #pragma omp task if (ifval) shared (x, y) depend(in: x) depend(inout: y) { if (x != 4 || y != 2) abort (); y = 3; } if (ifval == 0) { /* The above if (0) task should have waited till all the tasks with x and y dependencies finish. */ if (x != 4 || y != 3) abort (); x = 5; y = 4; } #pragma omp task shared (z) depend(inout: z) { if (z != 3) abort (); z = 4; } #pragma omp task shared (z) depend(inout: z) { if (z != 4) abort (); z = 5; } #pragma omp taskwait if (x != (ifval ? 4 : 5) || y != (ifval ? 3 : 4) || z != 5) abort (); #pragma omp task if (ifval) shared (x, y) depend(in: x) depend(inout: y) { if (x != (ifval ? 4 : 5) || y != (ifval ? 3 : 4)) abort (); } } } int main () { f1 (0); f1 (1); return 0; }
normal.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt #include "callback.h" int main() { #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:.+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: 0: NULL_POINTER=[[NULL:.*$]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker={{.*}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] return 0; }
CC.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <mpi.h> // These macros should be defined before stdint.h is included #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stdint.h> #include <sys/time.h> #include <iostream> #include <fstream> #include <string> #include <sstream> #include <ctime> #include <cmath> #include "CombBLAS/CombBLAS.h" //#define CC_TIMING 1 #define NONSTAR 0 #define STAR 1 #define CONVERGED 2 using namespace std; /** ** Connected components based on Awerbuch-Shiloach algorithm **/ namespace combblas { template <typename T1, typename T2> struct Select2ndMinSR { typedef typename promote_trait<T1,T2>::T_promote T_promote; static T_promote id(){ return std::numeric_limits<T_promote>::max(); }; static bool returnedSAID() { return false; } static MPI_Op mpi_op() { return MPI_MIN; }; static T_promote add(const T_promote& arg1, const T_promote& arg2) { return std::min(arg1, arg2); } static T_promote multiply(const T1& arg1, const T2& arg2) { return static_cast<T_promote>(arg2); } static void axpy(const T1 a, const T2& x, T_promote& y) { y = add(y, multiply(a, x)); } }; template <class T, class I> void omp_par_scan(T* A, T* B,I cnt) { int p=omp_get_max_threads(); if(cnt<100*p){ for(I i=1;i<cnt;i++) B[i]=B[i-1]+A[i-1]; return; } I step_size=cnt/p; #pragma omp parallel for for(int i=0; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; if(i!=0)B[start]=0; for(I j=start+1; j<end; j++) B[j]=B[j-1]+A[j-1]; } T* sum=new T[p]; sum[0]=0; for(int i=1;i<p;i++) sum[i]=sum[i-1]+B[i*step_size-1]+A[i*step_size-1]; #pragma omp parallel for for(int i=1; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; T sum_=sum[i]; for(I j=start; j<end; j++) B[j]+=sum_; } delete[] sum; } // copied from usort so that we can select k // an increased value of k reduces the bandwidth cost, but increases the latency cost // this does not work when p is not power of two and a processor is not sending data, template <typename T> int Mpi_Alltoallv_kway(T* sbuff_, int* s_cnt_, int* sdisp_, T* rbuff_, int* r_cnt_, int* rdisp_, MPI_Comm c, int kway=2) { int np, pid; MPI_Comm_size(c,&np); MPI_Comm_rank(c,&pid); if(np==1 || kway==1) { return MPI_Alltoallv(sbuff_, s_cnt_, sdisp_, MPIType<T>(), rbuff_, r_cnt_, rdisp_, MPIType<T>(), c); } int range[2]={0,np}; std::vector<int> s_cnt(np); #pragma omp parallel for for(int i=0;i<np;i++){ s_cnt[i]=s_cnt_[i]*sizeof(T)+2*sizeof(int); } std::vector<int> sdisp(np); sdisp[0]=0; omp_par_scan(&s_cnt[0],&sdisp[0],np); char* sbuff=new char[sdisp[np-1]+s_cnt[np-1]]; #pragma omp parallel for for(int i=0;i<np;i++){ ((int*)&sbuff[sdisp[i]])[0]=s_cnt[i]; ((int*)&sbuff[sdisp[i]])[1]=pid; memcpy(&sbuff[sdisp[i]]+2*sizeof(int),&sbuff_[sdisp_[i]],s_cnt[i]-2*sizeof(int)); } //int t_indx=0; int iter_cnt=0; while(range[1]-range[0]>1){ iter_cnt++; if(kway>range[1]-range[0]) kway=range[1]-range[0]; std::vector<int> new_range(kway+1); for(int i=0;i<=kway;i++) new_range[i]=(range[0]*(kway-i)+range[1]*i)/kway; int p_class=(std::upper_bound(&new_range[0],&new_range[kway],pid)-&new_range[0]-1); int new_np=new_range[p_class+1]-new_range[p_class]; int new_pid=pid-new_range[p_class]; //Communication. { std::vector<int> r_cnt (new_np*kway, 0); std::vector<int> r_cnt_ext(new_np*kway, 0); //Exchange send sizes. for(int i=0;i<kway;i++){ MPI_Status status; int cmp_np=new_range[i+1]-new_range[i]; int partner=(new_pid<cmp_np? new_range[i]+new_pid: new_range[i+1]-1) ; assert( (new_pid<cmp_np? true: new_range[i]+new_pid==new_range[i+1] )); //Remove this. MPI_Sendrecv(&s_cnt[new_range[i]-new_range[0]], cmp_np, MPI_INT, partner, 0, &r_cnt[new_np *i ], new_np, MPI_INT, partner, 0, c,&status); //Handle extra communication. if(new_pid==new_np-1&& cmp_np>new_np){ int partner=new_range[i+1]-1; std::vector<int> s_cnt_ext(cmp_np, 0); MPI_Sendrecv(&s_cnt_ext[ 0], cmp_np, MPI_INT, partner, 0, &r_cnt_ext[new_np*i], new_np, MPI_INT, partner, 0, c,&status); } } //Allocate receive buffer. std::vector<int> rdisp (new_np*kway, 0); std::vector<int> rdisp_ext(new_np*kway, 0); int rbuff_size, rbuff_size_ext; char *rbuff, *rbuff_ext; { omp_par_scan(&r_cnt [0],&rdisp [0],new_np*kway); omp_par_scan(&r_cnt_ext[0],&rdisp_ext[0],new_np*kway); rbuff_size = rdisp [new_np*kway-1] + r_cnt [new_np*kway-1]; rbuff_size_ext = rdisp_ext[new_np*kway-1] + r_cnt_ext[new_np*kway-1]; rbuff = new char[rbuff_size ]; rbuff_ext = new char[rbuff_size_ext]; } //Sendrecv data. //* int my_block=kway; while(pid<new_range[my_block]) my_block--; // MPI_Barrier(c); for(int i_=0;i_<=kway/2;i_++){ int i1=(my_block+i_)%kway; int i2=(my_block+kway-i_)%kway; for(int j=0;j<(i_==0 || i_==kway/2?1:2);j++){ int i=(i_==0?i1:((j+my_block/i_)%2?i1:i2)); MPI_Status status; int cmp_np=new_range[i+1]-new_range[i]; int partner=(new_pid<cmp_np? new_range[i]+new_pid: new_range[i+1]-1) ; int send_dsp =sdisp[new_range[i ]-new_range[0] ]; int send_dsp_last=sdisp[new_range[i+1]-new_range[0]-1]; int send_cnt =s_cnt[new_range[i+1]-new_range[0]-1]+send_dsp_last-send_dsp; // ttt=omp_get_wtime(); MPI_Sendrecv(&sbuff[send_dsp], send_cnt, MPI_BYTE, partner, 0, &rbuff[rdisp[new_np * i ]], r_cnt[new_np *(i+1)-1]+rdisp[new_np *(i+1)-1]-rdisp[new_np * i ], MPI_BYTE, partner, 0, c,&status); //Handle extra communication. if(pid==new_np-1&& cmp_np>new_np){ int partner=new_range[i+1]-1; std::vector<int> s_cnt_ext(cmp_np, 0); MPI_Sendrecv( NULL, 0, MPI_BYTE, partner, 0, &rbuff[rdisp_ext[new_np*i]], r_cnt_ext[new_np*(i+1)-1]+rdisp_ext[new_np*(i+1)-1]-rdisp_ext[new_np*i], MPI_BYTE, partner, 0, c,&status); } } } //Rearrange received data. { if(sbuff!=NULL) delete[] sbuff; sbuff=new char[rbuff_size+rbuff_size_ext]; std::vector<int> cnt_new(2*new_np*kway, 0); std::vector<int> disp_new(2*new_np*kway, 0); for(int i=0;i<new_np;i++) for(int j=0;j<kway;j++){ cnt_new[(i*2 )*kway+j]=r_cnt [j*new_np+i]; cnt_new[(i*2+1)*kway+j]=r_cnt_ext[j*new_np+i]; } omp_par_scan(&cnt_new[0],&disp_new[0],2*new_np*kway); #pragma omp parallel for for(int i=0;i<new_np;i++) for(int j=0;j<kway;j++){ memcpy(&sbuff[disp_new[(i*2 )*kway+j]],&rbuff [rdisp [j*new_np+i]], r_cnt [j*new_np+i]); memcpy(&sbuff[disp_new[(i*2+1)*kway+j]],&rbuff_ext[rdisp_ext[j*new_np+i]], r_cnt_ext[j*new_np+i]); } //Free memory. if(rbuff !=NULL) delete[] rbuff ; if(rbuff_ext!=NULL) delete[] rbuff_ext; s_cnt.clear(); s_cnt.resize(new_np,0); sdisp.resize(new_np); for(int i=0;i<new_np;i++){ for(int j=0;j<2*kway;j++) s_cnt[i]+=cnt_new[i*2*kway+j]; sdisp[i]=disp_new[i*2*kway]; } } } range[0]=new_range[p_class ]; range[1]=new_range[p_class+1]; } //Copy data to rbuff_. std::vector<char*> buff_ptr(np); char* tmp_ptr=sbuff; for(int i=0;i<np;i++){ int& blk_size=((int*)tmp_ptr)[0]; buff_ptr[i]=tmp_ptr; tmp_ptr+=blk_size; } #pragma omp parallel for for(int i=0;i<np;i++){ int& blk_size=((int*)buff_ptr[i])[0]; int& src_pid=((int*)buff_ptr[i])[1]; assert(blk_size-2*sizeof(int)<=r_cnt_[src_pid]*sizeof(T)); memcpy(&rbuff_[rdisp_[src_pid]],buff_ptr[i]+2*sizeof(int),blk_size-2*sizeof(int)); } //Free memory. if(sbuff !=NULL) delete[] sbuff; return 1; } template <typename T> int Mpi_Alltoallv(T* sbuff, int* s_cnt, int* sdisp, T* rbuff, int* r_cnt, int* rdisp, MPI_Comm comm) { int nprocs, rank; MPI_Comm_size(comm,&nprocs); MPI_Comm_rank(comm,&rank); int commCnt = 0; for(int i = 0; i < nprocs; i++) { if(i==rank) continue; if(s_cnt[i] > 0) commCnt++; if(r_cnt[i] > 0) commCnt++; } int totalCommCnt = 0; MPI_Allreduce(&commCnt,&totalCommCnt, 1, MPI_INT, MPI_SUM, comm); if(totalCommCnt < 2*log2(nprocs)) { return par::Mpi_Alltoallv_sparse(sbuff, s_cnt, sdisp, rbuff, r_cnt, rdisp, comm); } else if((nprocs& (nprocs - 1)) == 0) // processor count is power of 2 { Mpi_Alltoallv_kway(sbuff, s_cnt, sdisp, rbuff, r_cnt, rdisp, comm); } else { return MPI_Alltoallv(sbuff, s_cnt, sdisp, MPIType<T>(), rbuff, r_cnt, rdisp, MPIType<T>(), comm); } return 1; } template <class IT, class NT> int replicate(const FullyDistVec<IT,NT> dense, FullyDistSpVec<IT,IT> ri, vector<vector<NT>>&bcastBuffer) { auto commGrid = dense.getcommgrid(); MPI_Comm World = commGrid->GetWorld(); int nprocs = commGrid->GetSize(); vector<int> sendcnt (nprocs,0); vector<int> recvcnt (nprocs,0); std::vector<IT> rinum = ri.GetLocalNum(); IT riloclen = rinum.size(); for(IT i=0; i < riloclen; ++i) { IT locind; int owner = dense.Owner(rinum[i], locind); sendcnt[owner]++; } MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World); IT totrecv = std::accumulate(recvcnt.begin(),recvcnt.end(), static_cast<IT>(0)); double broadcast_cost = dense.LocArrSize() * log2(nprocs); // bandwidth cost IT bcastsize = 0; vector<IT> bcastcnt(nprocs,0); int nbcast = 0; if(broadcast_cost < totrecv) { bcastsize = dense.LocArrSize(); } MPI_Allgather(&bcastsize, 1, MPIType<IT>(), bcastcnt.data(), 1, MPIType<IT>(), World); for(int i=0; i<nprocs; i++) { if(bcastcnt[i]>0) nbcast++; } if(nbcast > 0) { MPI_Request* requests = new MPI_Request[nbcast]; assert(requests); MPI_Status* statuses = new MPI_Status[nbcast]; assert(statuses); int ibcast = 0; const NT * arr = dense.GetLocArr(); for(int i=0; i<nprocs; i++) { if(bcastcnt[i]>0) { bcastBuffer[i].resize(bcastcnt[i]); std::copy(arr, arr+bcastcnt[i], bcastBuffer[i].begin()); MPI_Ibcast(bcastBuffer[i].data(), bcastcnt[i], MPIType<NT>(), i, World,&requests[ibcast++]); } } MPI_Waitall(nbcast, requests, statuses); delete [] requests; delete [] statuses; } return nbcast; } // SubRef usign a sparse vector // given a dense vector dv and a sparse vector sv // sv_out[i]=dv[sv[i]] for all nonzero index i in sv // return sv_out // If sv has repeated entries, many processes are requesting same entries of dv from the same processes // (usually from the low rank processes in LACC) // In this case, it may be beneficial to broadcast some entries of dv so that dv[sv[i]] can be obtained locally. // This logic is implemented in this function: replicate(dense, ri, bcastBuffer) template <class IT, class NT> FullyDistSpVec<IT,NT> Extract (const FullyDistVec<IT,NT> dense, FullyDistSpVec<IT,IT> ri) { #ifdef CC_TIMING double ts = MPI_Wtime(); std::ostringstream outs; outs.str(""); outs.clear(); outs<< " Extract timing: "; #endif auto commGrid = ri.getcommgrid(); MPI_Comm World = commGrid->GetWorld(); int nprocs = commGrid->GetSize(); if(!(commGrid == dense.getcommgrid())) { std::cout << "Grids are not comparable for dense vector subsref" << std::endl; return FullyDistSpVec<IT,NT>(); } vector<vector<NT>> bcastBuffer(nprocs); #ifdef CC_TIMING double t1 = MPI_Wtime(); #endif int nbcast = replicate(dense, ri, bcastBuffer); #ifdef CC_TIMING double bcast = MPI_Wtime() - t1; outs << "bcast ( " << nbcast << " ): " << bcast << " "; #endif std::vector< std::vector< IT > > data_req(nprocs); std::vector< std::vector< IT > > revr_map(nprocs); // to put the incoming data to the correct location const NT * arr = dense.GetLocArr(); std::vector<IT> rinum = ri.GetLocalNum(); IT riloclen = rinum.size(); std::vector<NT> num(riloclen); // final output for(IT i=0; i < riloclen; ++i) { IT locind; int owner = dense.Owner(rinum[i], locind); if(bcastBuffer[owner].size() == 0) { data_req[owner].push_back(locind); revr_map[owner].push_back(i); } else { num[i] =bcastBuffer[owner][locind]; } } int * sendcnt = new int[nprocs]; int * sdispls = new int[nprocs]; for(int i=0; i<nprocs; ++i) sendcnt[i] = (int) data_req[i].size(); int * rdispls = new int[nprocs]; int * recvcnt = new int[nprocs]; #ifdef CC_TIMING t1 = MPI_Wtime(); #endif MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts #ifdef CC_TIMING double all2ll1 = MPI_Wtime() - t1; outs << "all2ll1: " << all2ll1 << " "; #endif sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totsend = std::accumulate(sendcnt,sendcnt+nprocs, static_cast<IT>(0)); IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0)); IT * sendbuf = new IT[totsend]; for(int i=0; i<nprocs; ++i) { std::copy(data_req[i].begin(), data_req[i].end(), sendbuf+sdispls[i]); std::vector<IT>().swap(data_req[i]); } IT * reversemap = new IT[totsend]; for(int i=0; i<nprocs; ++i) { std::copy(revr_map[i].begin(), revr_map[i].end(), reversemap+sdispls[i]); // reversemap array is unique std::vector<IT>().swap(revr_map[i]); } IT * recvbuf = new IT[totrecv]; #ifdef CC_TIMING t1 = MPI_Wtime(); #endif Mpi_Alltoallv(sendbuf, sendcnt, sdispls, recvbuf, recvcnt, rdispls, World); #ifdef CC_TIMING double all2ll2 = MPI_Wtime() - t1; outs << "all2ll2: " << all2ll2 << " "; #endif delete [] sendbuf; // access requested data NT * databack = new NT[totrecv]; #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<totrecv; ++i) databack[i] = arr[recvbuf[i]]; delete [] recvbuf; // communicate requested data NT * databuf = new NT[totsend]; // the response counts are the same as the request counts #ifdef CC_TIMING t1 = MPI_Wtime(); #endif //Mpi_Alltoallv_sparse(databack, recvcnt, rdispls,databuf, sendcnt, sdispls, World); Mpi_Alltoallv(databack, recvcnt, rdispls,databuf, sendcnt, sdispls, World); #ifdef CC_TIMING double all2ll3 = MPI_Wtime() - t1; outs << "all2ll3: " << all2ll3 << " "; #endif // Create the output from databuf for(int i=0; i<totsend; ++i) num[reversemap[i]] = databuf[i]; DeleteAll(rdispls, recvcnt, databack); DeleteAll(sdispls, sendcnt, databuf,reversemap); std::vector<IT> ind = ri.GetLocalInd (); IT globallen = ri.TotalLength(); FullyDistSpVec<IT, NT> indexed(commGrid, globallen, ind, num, true, true); #ifdef CC_TIMING double total = MPI_Wtime() - ts; outs << "others: " << total - (bcast + all2ll1 + all2ll2 + all2ll3) << " "; outs<< endl; SpParHelper::Print(outs.str()); #endif return indexed; } template <class IT, class NT> int ReduceAssign(FullyDistSpVec<IT,IT>& ind, FullyDistSpVec<IT,NT>& val, vector<vector<NT>>&reduceBuffer, NT MAX_FOR_REDUCE) { auto commGrid = ind.getcommgrid(); MPI_Comm World = commGrid->GetWorld(); int nprocs = commGrid->GetSize(); int myrank; MPI_Comm_rank(World,&myrank); vector<int> sendcnt (nprocs,0); vector<int> recvcnt (nprocs); std::vector<std::vector<IT>> indBuf(nprocs); std::vector<std::vector<NT>> valBuf(nprocs); std::vector<IT> indices = ind.GetLocalNum(); std::vector<NT> values = val.GetLocalNum(); IT riloclen = indices.size(); for(IT i=0; i < riloclen; ++i) { IT locind; int owner = ind.Owner(indices[i], locind); indBuf[owner].push_back(locind); valBuf[owner].push_back(values[i]); sendcnt[owner]++; } MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World); IT totrecv = std::accumulate(recvcnt.begin(),recvcnt.end(), static_cast<IT>(0)); double reduceCost = ind.MyLocLength() * log2(nprocs); // bandwidth cost IT reducesize = 0; vector<IT> reducecnt(nprocs,0); int nreduce = 0; if(reduceCost < totrecv) { reducesize = ind.MyLocLength(); } MPI_Allgather(&reducesize, 1, MPIType<IT>(), reducecnt.data(), 1, MPIType<IT>(), World); for(int i=0; i<nprocs; ++i) { if(reducecnt[i]>0) nreduce++; } if(nreduce > 0) { MPI_Request* requests = new MPI_Request[nreduce]; assert(requests); MPI_Status* statuses = new MPI_Status[nreduce]; assert(statuses); int ireduce = 0; for(int i=0; i<nprocs; ++i) { if(reducecnt[i]>0) { reduceBuffer[i].resize(reducecnt[i], MAX_FOR_REDUCE); // this is specific to LACC for(int j=0; j<sendcnt[i]; j++) reduceBuffer[i][indBuf[i][j]] = std::min(reduceBuffer[i][indBuf[i][j]], valBuf[i][j]); if(myrank==i) MPI_Ireduce(MPI_IN_PLACE, reduceBuffer[i].data(), reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]); else MPI_Ireduce(reduceBuffer[i].data(), NULL, reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]); } } MPI_Waitall(nreduce, requests, statuses); //MPI_Barrier(World); delete [] requests; delete [] statuses; } return nreduce; } // for fixed value template <class IT, class NT> int ReduceAssign(FullyDistSpVec<IT,IT>& ind, NT val, vector<vector<NT>>&reduceBuffer, NT MAX_FOR_REDUCE) { auto commGrid = ind.getcommgrid(); MPI_Comm World = commGrid->GetWorld(); int nprocs = commGrid->GetSize(); int myrank; MPI_Comm_rank(World,&myrank); vector<int> sendcnt (nprocs,0); vector<int> recvcnt (nprocs); std::vector<std::vector<IT>> indBuf(nprocs); std::vector<IT> indices = ind.GetLocalNum(); IT riloclen = indices.size(); for(IT i=0; i < riloclen; ++i) { IT locind; int owner = ind.Owner(indices[i], locind); indBuf[owner].push_back(locind); sendcnt[owner]++; } MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World); IT totrecv = std::accumulate(recvcnt.begin(),recvcnt.end(), static_cast<IT>(0)); double reduceCost = ind.MyLocLength() * log2(nprocs); // bandwidth cost IT reducesize = 0; vector<IT> reducecnt(nprocs,0); int nreduce = 0; if(reduceCost < totrecv) { reducesize = ind.MyLocLength(); } MPI_Allgather(&reducesize, 1, MPIType<IT>(), reducecnt.data(), 1, MPIType<IT>(), World); for(int i=0; i<nprocs; ++i) { if(reducecnt[i]>0) nreduce++; } if(nreduce > 0) { MPI_Request* requests = new MPI_Request[nreduce]; assert(requests); MPI_Status* statuses = new MPI_Status[nreduce]; assert(statuses); int ireduce = 0; for(int i=0; i<nprocs; ++i) { if(reducecnt[i]>0) { reduceBuffer[i].resize(reducecnt[i], MAX_FOR_REDUCE); // this is specific to LACC for(int j=0; j<sendcnt[i]; j++) reduceBuffer[i][indBuf[i][j]] = val; if(myrank==i) MPI_Ireduce(MPI_IN_PLACE, reduceBuffer[i].data(), reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]); else MPI_Ireduce(reduceBuffer[i].data(), NULL, reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]); } } MPI_Waitall(nreduce, requests, statuses); //MPI_Barrier(World); delete [] requests; delete [] statuses; } return nreduce; } // given two sparse vectors sv and val // sv_out[sv[i]] = val[i] for all nonzero index i in sv, whre sv_out is the output sparse vector // If sv has repeated entries, a process may receive the same values of sv from different processes // In this case, it may be beneficial to reduce some entries of sv so that sv_out[sv[i]] can be updated locally. // This logic is implemented in this function: ReduceAssign template <class IT, class NT> FullyDistSpVec<IT,NT> Assign(FullyDistSpVec<IT,IT>& ind, FullyDistSpVec<IT,NT>& val) { IT ploclen = ind.getlocnnz(); if(ploclen != val.getlocnnz()) { SpParHelper::Print("Assign error: Index and value vectors have different size !!!\n"); return FullyDistSpVec<IT,NT>(ind.getcommgrid()); } IT globallen = ind.TotalLength(); IT maxInd = ind.Reduce(maximum<IT>(), (IT) 0 ) ; if(maxInd >= globallen) { std::cout << "At least one requested index is larger than the global length" << std::endl; return FullyDistSpVec<IT,NT>(ind.getcommgrid()); } #ifdef CC_TIMING double ts = MPI_Wtime(); std::ostringstream outs; outs.str(""); outs.clear(); outs<< " Assign timing: "; #endif auto commGrid = ind.getcommgrid(); MPI_Comm World = commGrid->GetWorld(); int nprocs = commGrid->GetSize(); int * rdispls = new int[nprocs+1]; int * recvcnt = new int[nprocs]; int * sendcnt = new int[nprocs](); // initialize to 0 int * sdispls = new int[nprocs+1]; vector<vector<NT>> reduceBuffer(nprocs); #ifdef CC_TIMING double t1 = MPI_Wtime(); #endif NT MAX_FOR_REDUCE = static_cast<NT>(globallen); int nreduce = ReduceAssign(ind, val, reduceBuffer, MAX_FOR_REDUCE); #ifdef CC_TIMING double reduce = MPI_Wtime() - t1; outs << "reduce (" << nreduce << "): " << reduce << " "; #endif std::vector<std::vector<IT>> indBuf(nprocs); std::vector<std::vector<NT>> valBuf(nprocs); std::vector<IT> indices = ind.GetLocalNum(); std::vector<NT> values = val.GetLocalNum(); IT riloclen = indices.size(); for(IT i=0; i < riloclen; ++i) { IT locind; int owner = ind.Owner(indices[i], locind); if(reduceBuffer[owner].size() == 0) { indBuf[owner].push_back(locind); valBuf[owner].push_back(values[i]); sendcnt[owner]++; } } #ifdef CC_TIMING t1 = MPI_Wtime(); #endif MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); #ifdef CC_TIMING double all2ll1 = MPI_Wtime() - t1; outs << "all2ll1: " << all2ll1 << " "; #endif sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totsend = sdispls[nprocs]; IT totrecv = rdispls[nprocs]; vector<IT> sendInd(totsend); vector<NT> sendVal(totsend); for(int i=0; i<nprocs; ++i) { std::copy(indBuf[i].begin(), indBuf[i].end(), sendInd.begin()+sdispls[i]); std::vector<IT>().swap(indBuf[i]); std::copy(valBuf[i].begin(), valBuf[i].end(), sendVal.begin()+sdispls[i]); std::vector<NT>().swap(valBuf[i]); } vector<IT> recvInd(totrecv); vector<NT> recvVal(totrecv); #ifdef CC_TIMING t1 = MPI_Wtime(); #endif Mpi_Alltoallv(sendInd.data(), sendcnt, sdispls, recvInd.data(), recvcnt, rdispls, World); //MPI_Alltoallv(sendInd.data(), sendcnt, sdispls, MPIType<IT>(), recvInd.data(), recvcnt, rdispls, MPIType<IT>(), World); #ifdef CC_TIMING double all2ll2 = MPI_Wtime() - t1; outs << "all2ll2: " << all2ll2 << " "; #endif #ifdef CC_TIMING t1 = MPI_Wtime(); #endif Mpi_Alltoallv(sendVal.data(), sendcnt, sdispls, recvVal.data(), recvcnt, rdispls, World); #ifdef CC_TIMING double all2ll3 = MPI_Wtime() - t1; outs << "all2ll3: " << all2ll3 << " "; #endif DeleteAll(sdispls, rdispls, sendcnt, recvcnt); int myrank; MPI_Comm_rank(World,&myrank); if(reduceBuffer[myrank].size()>0) { //cout << myrank << " : " << recvInd.size() << endl; for(int i=0; i<reduceBuffer[myrank].size(); i++) { if(reduceBuffer[myrank][i] < MAX_FOR_REDUCE) { recvInd.push_back(i); recvVal.push_back(reduceBuffer[myrank][i]); } } } FullyDistSpVec<IT, NT> indexed(commGrid, globallen, recvInd, recvVal, false, false); #ifdef CC_TIMING double total = MPI_Wtime() - ts; outs << "others: " << total - (reduce + all2ll1 + all2ll2 + all2ll3) << " "; outs<< endl; SpParHelper::Print(outs.str()); #endif return indexed; } // given a sparse vector sv // sv_out[sv[i]] = val for all nonzero index i in sv, whre sv_out is the output sparse vector // If sv has repeated entries, a process may receive the same values of sv from different processes // In this case, it may be beneficial to reduce some entries of sv so that sv_out[sv[i]] can be updated locally. // This logic is implemented in this function: ReduceAssign template <class IT, class NT> FullyDistSpVec<IT,NT> Assign (FullyDistSpVec<IT,IT>& ind, NT val) { IT globallen = ind.TotalLength(); IT maxInd = ind.Reduce(maximum<IT>(), (IT) 0 ) ; if(maxInd >= globallen) { std::cout << "At least one requested index is larger than the global length" << std::endl; return FullyDistSpVec<IT,NT>(ind.getcommgrid()); } #ifdef CC_TIMING double ts = MPI_Wtime(); std::ostringstream outs; outs.str(""); outs.clear(); outs<< " Assign timing: "; #endif auto commGrid = ind.getcommgrid(); MPI_Comm World = commGrid->GetWorld(); int nprocs = commGrid->GetSize(); int * rdispls = new int[nprocs+1]; int * recvcnt = new int[nprocs]; int * sendcnt = new int[nprocs](); // initialize to 0 int * sdispls = new int[nprocs+1]; vector<vector<NT>> reduceBuffer(nprocs); #ifdef CC_TIMING double t1 = MPI_Wtime(); #endif NT MAX_FOR_REDUCE = static_cast<NT>(globallen); int nreduce = ReduceAssign(ind, val, reduceBuffer, MAX_FOR_REDUCE); #ifdef CC_TIMING double reduce = MPI_Wtime() - t1; outs << "reduce ( " << nreduce << " ): " << reduce << " "; #endif std::vector<std::vector<IT>> indBuf(nprocs); std::vector<IT> indices = ind.GetLocalNum(); IT riloclen = indices.size(); for(IT i=0; i < riloclen; ++i) { IT locind; int owner = ind.Owner(indices[i], locind); if(reduceBuffer[owner].size() == 0) { indBuf[owner].push_back(locind); sendcnt[owner]++; } } #ifdef CC_TIMING t1 = MPI_Wtime(); #endif MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); #ifdef CC_TIMING double all2ll1 = MPI_Wtime() - t1; outs << "all2ll1: " << all2ll1 << " "; #endif sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totsend = sdispls[nprocs]; IT totrecv = rdispls[nprocs]; vector<IT> sendInd(totsend); for(int i=0; i<nprocs; ++i) { std::copy(indBuf[i].begin(), indBuf[i].end(), sendInd.begin()+sdispls[i]); std::vector<IT>().swap(indBuf[i]); } vector<IT> recvInd(totrecv); #ifdef CC_TIMING t1 = MPI_Wtime(); #endif Mpi_Alltoallv(sendInd.data(), sendcnt, sdispls, recvInd.data(), recvcnt, rdispls, World); //MPI_Alltoallv(sendInd.data(), sendcnt, sdispls, MPIType<IT>(), recvInd.data(), recvcnt, rdispls, MPIType<IT>(), World); #ifdef CC_TIMING double all2ll2 = MPI_Wtime() - t1; outs << "all2ll2: " << all2ll2 << " "; outs << "all2ll3: " << 0 << " "; #endif DeleteAll(sdispls, rdispls, sendcnt, recvcnt); int myrank; MPI_Comm_rank(World,&myrank); vector<NT> recvVal(totrecv); if(reduceBuffer[myrank].size()>0) { //cout << myrank << " : " << recvInd.size() << endl; for(int i=0; i<reduceBuffer[myrank].size(); i++) { if(reduceBuffer[myrank][i] < MAX_FOR_REDUCE) { recvInd.push_back(i); recvVal.push_back(val); } } } FullyDistSpVec<IT, NT> indexed(commGrid, globallen, recvInd, recvVal, false, false); #ifdef CC_TIMING double total = MPI_Wtime() - ts; outs << "others: " << total - (reduce + all2ll1 + all2ll2) << " "; outs<< endl; SpParHelper::Print(outs.str()); #endif return indexed; } // special starcheck after conditional and unconditional hooking template <typename IT, typename NT, typename DER> void StarCheckAfterHooking(const SpParMat<IT, NT, DER>& A, FullyDistVec<IT, IT>& parent, FullyDistVec<IT, short>& star, FullyDistSpVec<IT, IT> condhooks, bool isStar2StarHookPossible) { // hooks are nonstars star.EWiseApply(condhooks, [](short isStar, IT x){ return static_cast<short>(NONSTAR); }, false, static_cast<IT>(NONSTAR)); if(isStar2StarHookPossible) { // this is not needed in the first iteration see the complicated proof in the paper // parents of hooks are nonstars // needed only after conditional hooking because in that case star can hook to a star FullyDistSpVec<IT, short> pNonStar= Assign(condhooks, NONSTAR); star.Set(pNonStar); } // star(parent) // If I am a star, I would like to know the star information of my parent // children of hooks and parents of hooks are nonstars // NOTE: they are not needed in the first iteration FullyDistSpVec<IT, short> spStars(star, [](short isStar){ return isStar == STAR; }); FullyDistSpVec<IT, IT> parentOfStars = EWiseApply<IT>(spStars, parent, [](short isStar, IT p){ return p; }, [](short isStar, IT p){ return true; }, false, static_cast<short>(0)); FullyDistSpVec<IT, short> isParentStar = Extract(star, parentOfStars); star.Set(isParentStar); } /* // In iteration 1: "stars" has both vertices belongihg to stars and nonstars (no converged) // we only process nonstars and identify starts from them // After iteration 1: "stars" has vertices belongihg to converged and nonstars (no stars) // we only process nonstars and identify starts from them template <typename IT> void StarCheck(FullyDistVec<IT, IT>& parents, FullyDistVec<IT,short>& stars) { // this is done here so that in the first iteration, we don't process STAR vertices FullyDistSpVec<IT,short> nonStars(stars, [](short isStar){return isStar==NONSTAR;}); // initialize all nonstars to stars stars.Apply([](short isStar){return isStar==NONSTAR? STAR: isStar;}); // identify vertices at level >= 2 (grandchildren of roots) FullyDistSpVec<IT, IT> pOfNonStars = EWiseApply<IT>(nonStars, parents, [](short isStar, IT p){return p;}, [](short isStar, IT p){return true;}, false, static_cast<short>(0)); FullyDistSpVec<IT,IT> gpOfNonStars = Extract(parents, pOfNonStars); FullyDistSpVec<IT,short> keptNonStars = EWiseApply<short>(pOfNonStars, gpOfNonStars, [](IT p, IT gp){return static_cast<short>(NONSTAR);}, [](IT p, IT gp){return p!=gp;}, false, false, static_cast<IT>(0), static_cast<IT>(0)); stars.Set(keptNonStars); // setting level > 2 vertices as nonstars // identify grand parents of kept nonstars FullyDistSpVec<IT,IT> gpOfKeptNonStars = EWiseApply<IT>(pOfNonStars, gpOfNonStars, [](IT p, IT gp){return gp;}, [](IT p, IT gp){return p!=gp;}, false, false, static_cast<IT>(0), static_cast<IT>(0)); //FullyDistSpVec<IT, short> fixedNS = gpOfKeptNonStars; //fixedNS = NONSTAR; FullyDistSpVec<IT, short> gpNonStar= Assign(gpOfKeptNonStars, NONSTAR); stars.Set(gpNonStar); // remaining vertices: level-1 leaves of nonstars and any vertices in previous stars (iteration 1 only) FullyDistSpVec<IT,short> spStars(stars, [](short isStar){return isStar==STAR;}); // further optimization can be done to remove previous stars FullyDistSpVec<IT, IT> pOfStars = EWiseApply<IT>(spStars, parents, [](short isStar, IT p){return p;}, [](short isStar, IT p){return true;}, false, static_cast<short>(0)); FullyDistSpVec<IT,short> isParentStar = Extract(stars, pOfStars); stars.Set(isParentStar); } */ // In iteration>1: // We have only CONVERGED or NONSTAR vertices // some of the NONSTAR vertices may become STAR in the last shortcut operation // We would like to identify those new stars // In iteration 1: // we have STAR and NONSTAR vertices // every hooked vertex is marked as NONSTARs // roots are marked as STARs (includign singletones) template <typename IT> void StarCheck(FullyDistVec<IT, IT>& parents, FullyDistVec<IT,short>& stars) { // this is done here so that in the first iteration, we don't process STAR vertices // all current nonstars FullyDistSpVec<IT,short> nonStars(stars, [](short isStar){return isStar==NONSTAR;}); // initialize all nonstars to stars stars.Apply([](short isStar){return isStar==NONSTAR? STAR: isStar;}); // parents of all current nonstars FullyDistSpVec<IT, IT> pOfNonStars = EWiseApply<IT>(nonStars, parents, [](short isStar, IT p){return p;}, [](short isStar, IT p){return true;}, false, static_cast<short>(0)); // parents of all current nonstars indexed by parent // any vertex with a child should be here // leaves are not present as indices, but roots are present FullyDistSpVec<IT,short> pOfNonStarsIdx = Assign(pOfNonStars, NONSTAR); // copy parent information (the values are grandparents) FullyDistSpVec<IT,IT> gpOfNonStars_pindexed = EWiseApply<IT>(pOfNonStarsIdx, parents, [](short isStar, IT p){return p;}, [](short isStar, IT p){return true;}, false, static_cast<short>(0)); // identify if they are parents/grandparents of a vertex with level > 2 FullyDistSpVec<IT,IT> temp = gpOfNonStars_pindexed; temp.setNumToInd(); gpOfNonStars_pindexed = EWiseApply<IT>(temp, gpOfNonStars_pindexed, [](IT p, IT gp){return gp;}, [](IT p, IT gp){return p!=gp;}, false, false, static_cast<IT>(0), static_cast<IT>(0)); // index has parents of vertices with level > 2 // value has grand parents of vertices with level > 2 // update parents // All vertices (except the root and leave ) in a non-star tree will be updated stars.EWiseApply(gpOfNonStars_pindexed, [](short isStar, IT idx){return static_cast<short>(NONSTAR);}, false, static_cast<IT>(NONSTAR)); // now everything is updated except the root and leaves of nonstars // identify roots (indexed by level-1 vertices) FullyDistSpVec<IT,IT> rootsOfNonStars = EWiseApply<IT>(pOfNonStars, stars, [](IT p, short isStar){return p;}, [](IT p, short isStar){return isStar==NONSTAR;}, false, static_cast<IT>(0)); FullyDistSpVec<IT,short> rootsOfNonStarsIdx = Assign(rootsOfNonStars, NONSTAR); stars.Set( rootsOfNonStarsIdx); // remaining vertices // they must be stars (created after the shortcut) or level-1 leaves of a non-star FullyDistSpVec<IT,IT> pOflevel1V = EWiseApply<IT>(nonStars, stars, [](short s, short isStar){return static_cast<IT> (s);}, [](short s, short isStar){return isStar==STAR;}, false, static_cast<short>(0)); pOflevel1V = EWiseApply<IT>(pOflevel1V, parents, [](IT s, IT p){return p;}, [](IT s, IT p){return true;}, false, static_cast<IT>(0)); FullyDistSpVec<IT,short> isParentStar = Extract(stars, pOflevel1V); stars.Set(isParentStar); } template <typename IT, typename NT, typename DER> FullyDistSpVec<IT, IT> ConditionalHook(const SpParMat<IT, NT, DER>& A, FullyDistVec<IT, IT>& parent, FullyDistVec<IT, short> stars, int iteration) { #ifdef CC_TIMING double t1 = MPI_Wtime(); #endif FullyDistVec<IT, IT> minNeighborparent (A.getcommgrid()); minNeighborparent = SpMV<Select2ndMinSR<NT, IT>>(A, parent); // value is the minimum of all neighbors' parents #ifdef CC_TIMING double tspmv = MPI_Wtime() - t1; #endif FullyDistSpVec<IT,IT> hooksMNP(stars, [](short isStar){ return isStar == STAR; }); hooksMNP = EWiseApply<IT>(hooksMNP, minNeighborparent, [](IT x, IT mnp){ return mnp; }, [](IT x, IT mnp){ return true; }, false, static_cast<IT> (0)); hooksMNP = EWiseApply<IT>(hooksMNP, parent, [](IT mnp, IT p){ return mnp; }, [](IT mnp, IT p){ return p > mnp; }, false, static_cast<IT> (0)); FullyDistSpVec<IT, IT> finalhooks(A.getcommgrid()); if(iteration == 1) { finalhooks = hooksMNP; } else { FullyDistSpVec<IT,IT> hooksP = EWiseApply<IT>(hooksMNP, parent, [](IT mnp, IT p){ return p; }, [](IT mnp, IT p){ return true; }, false, static_cast<IT> (0)); finalhooks = Assign(hooksP, hooksMNP); } parent.Set(finalhooks); #ifdef CC_TIMING double tall = MPI_Wtime() - t1; std::ostringstream outs; outs.str(""); outs.clear(); outs << " Conditional Hooking Time: SpMV: " << tspmv << " Other: "<< tall-tspmv; outs<< endl; SpParHelper::Print(outs.str()); #endif return finalhooks; } template <typename IT, typename NT, typename DER> FullyDistSpVec<IT, IT> UnconditionalHook2(const SpParMat<IT, NT, DER>& A, FullyDistVec<IT, IT>& parents, FullyDistVec<IT, short> stars) { #ifdef CC_TIMING double ts = MPI_Wtime(); double t1, tspmv; #endif string spmv = "dense"; IT nNonStars = stars.Reduce(std::plus<IT>(), static_cast<IT>(0), [](short isStar){ return static_cast<IT>(isStar == NONSTAR); }); IT nv = A.getnrow(); FullyDistSpVec<IT, IT> hooks(A.getcommgrid(), nv); if(nNonStars * 50 < nv) // use SpMSpV { spmv = "sparse"; FullyDistSpVec<IT,IT> nonStars(stars, [](short isStar){ return isStar == NONSTAR; }); FullyDistSpVec<IT, IT> pOfNonStars = EWiseApply<IT>(nonStars, parents, [](short isStar, IT p){return p;}, [](short isStar, IT p){return true;}, false, static_cast<IT>(0)); #ifdef CC_TIMING t1 = MPI_Wtime(); #endif SpMV<Select2ndMinSR<NT, IT>>(A, pOfNonStars, hooks, false); #ifdef CC_TIMING tspmv = MPI_Wtime() - t1; #endif hooks = EWiseApply<IT>(hooks, stars, [](IT mnp, short isStar){ return mnp; }, [](IT mnp, short isStar){ return isStar == STAR; }, false, static_cast<IT> (0)); } else // use SpMV { FullyDistVec<IT, IT> parents1 = parents; parents1.EWiseApply(stars, [nv](IT p, short isStar){ return isStar == STAR? nv: p; }); FullyDistVec<IT, IT> minNeighborParent ( A.getcommgrid()); #ifdef CC_TIMING t1 = MPI_Wtime(); #endif minNeighborParent = SpMV<Select2ndMinSR<NT, IT>>(A, parents1); // value is the minimum of all neighbors' parents #ifdef CC_TIMING tspmv = MPI_Wtime() - t1; #endif hooks = minNeighborParent.Find([nv](IT mnf){ return mnf != nv; }); hooks = EWiseApply<IT>(hooks, stars, [](IT mnp, short isStar){ return mnp; }, [](IT mnp, short isStar){ return isStar==STAR; }, false, static_cast<IT> (0)); } FullyDistSpVec<IT,IT> hooksP = EWiseApply<IT>(hooks, parents, [](IT mnp, IT p){return p;}, [](IT mnp, IT p){return true;}, false, static_cast<IT> (0)); FullyDistSpVec<IT, IT> finalHooks = Assign(hooksP, hooks); parents.Set(finalHooks); #ifdef CC_TIMING double tall = MPI_Wtime() - ts; std::ostringstream outs; outs.str(""); outs.clear(); outs << " Unconditional Hooking Time " << spmv << " : " << tspmv << " Other: "<< tall-tspmv; outs<< endl; SpParHelper::Print(outs.str()); #endif return finalHooks; } template <typename IT> void Shortcut(FullyDistVec<IT, IT>& parent) { FullyDistVec<IT, IT> grandparent = parent(parent); parent = grandparent; // we can do it unconditionally because it is trivially true for stars } // before shortcut, we will make all remaining start as inactive // shortcut only on nonstar vertices // then find stars on nonstar vertices template <typename IT> void Shortcut(FullyDistVec<IT, IT>& parents, FullyDistVec<IT,short> stars) { FullyDistSpVec<IT,short> spNonStars(stars, [](short isStar){return isStar==NONSTAR;}); FullyDistSpVec<IT, IT> parentsOfNonStars = EWiseApply<IT>(spNonStars, parents, [](short isStar, IT p){return p;}, [](short isStar, IT p){return true;}, false, static_cast<short>(0)); FullyDistSpVec<IT,IT> grandParentsOfNonStars = Extract(parents, parentsOfNonStars); parents.Set(grandParentsOfNonStars); } template <typename IT, typename NT, typename DER> bool neigborsInSameCC(const SpParMat<IT,NT,DER>& A, FullyDistVec<IT, IT>& cclabel) { FullyDistVec<IT, IT> minNeighborCCLabel ( A.getcommgrid()); minNeighborCCLabel = SpMV<Select2ndMinSR<NT, IT>>(A, cclabel); return minNeighborCCLabel==cclabel; } // works only on P=1 template <typename IT, typename NT, typename DER> void Correctness(const SpParMat<IT,NT,DER>& A, FullyDistVec<IT, IT>& cclabel, IT nCC, FullyDistVec<IT,IT> parent) { DER* spSeq = A.seqptr(); // local submatrix for(auto colit = spSeq->begcol(); colit != spSeq->endcol(); ++colit) // iterate over columns { IT j = colit.colid(); // local numbering for(auto nzit = spSeq->begnz(colit); nzit < spSeq->endnz(colit); ++nzit) { IT i = nzit.rowid(); if( cclabel[i] != cclabel[j]) { std::cout << i << " (" << parent[i] << ", "<< cclabel[i] << ")& "<< j << "("<< parent[j] << ", " << cclabel[j] << ")\n"; } } } } // Input: // parent: parent of each vertex. parent is essentilly the root of the star which a vertex belongs to. // parent of the root is itself // Output: // cclabel: connected components are incrementally labeled // returns the number of connected components // Example: input = [0, 0, 2, 3, 0, 2], output = (0, 0, 1, 2, 0, 1), return 3 template <typename IT> IT LabelCC(FullyDistVec<IT, IT>& parent, FullyDistVec<IT, IT>& cclabel) { cclabel = parent; cclabel.ApplyInd([](IT val, IT ind){return val==ind ? -1 : val;}); FullyDistSpVec<IT, IT> roots (cclabel, bind2nd(std::equal_to<IT>(), -1)); // parents of leaves are still correct FullyDistSpVec<IT, IT> pOfLeaves (cclabel, bind2nd(std::not_equal_to<IT>(), -1)); roots.nziota(0); cclabel.Set(roots); FullyDistSpVec<IT,IT> labelOfParents = Extract(cclabel, pOfLeaves); cclabel.Set(labelOfParents); //cclabel = cclabel(parent); return roots.getnnz(); } template <typename IT, typename NT, typename DER> FullyDistVec<IT, IT> CC(SpParMat<IT,NT,DER>& A, IT& nCC) { IT nrows = A.getnrow(); FullyDistVec<IT,IT> parent(A.getcommgrid()); parent.iota(nrows, 0); // parent(i)=i initially FullyDistVec<IT, short> stars(A.getcommgrid(), nrows, STAR); // initially every vertex belongs to a star int iteration = 1; std::ostringstream outs; // isolated vertices are marked as converged NT NullBValue; FullyDistVec<int64_t, NT> degree = A.Reduce(Column, plus<NT>(), NullBValue, [](NT val) { return val; }); // printf("%d\n", val.overhang); stars.EWiseApply(degree, [](short isStar, NT degree) { return degree.overhang == 0? CONVERGED: isStar; }); // printf("--%d\n", degree.overhang); int nthreads = 1; #ifdef THREADED #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif SpParMat<IT, bool, SpDCCols <IT, bool>> Abool = A; Abool.ActivateThreading(nthreads*4); while (true) { #ifdef CC_TIMING double t1 = MPI_Wtime(); #endif // GGGG: NT doesn't matter here FullyDistSpVec<IT, IT> condhooks = ConditionalHook(Abool, parent, stars, iteration); #ifdef CC_TIMING double t_cond_hook = MPI_Wtime() - t1; t1 = MPI_Wtime(); #endif // Any iteration other than the first iteration, // a non-star is formed after a conditional hooking // In the first iteration, we can hook two vertices to create a star // After the first iteratio, only singletone CCs reamin isolated // Here, we are ignoring the first iteration (still correct, but may ignore few possible // unconditional hooking in the first iteration) // remove cond hooks from stars if(iteration > 1) { // GGGG: NT doesn't matter here StarCheckAfterHooking(Abool, parent, stars, condhooks, true); } else { // explain stars.EWiseApply(condhooks, [](short isStar, IT x){ return static_cast<short>(NONSTAR); }, false, static_cast<IT>(NONSTAR)); FullyDistSpVec<IT, short> pNonStar= Assign(condhooks, NONSTAR); stars.Set(pNonStar); // it does not create any cycle in the unconditional hooking, see the proof in the paper } #ifdef CC_TIMING double t_starcheck1 = MPI_Wtime() - t1; t1 = MPI_Wtime(); #endif FullyDistSpVec<IT, IT> uncondHooks = UnconditionalHook2(Abool, parent, stars); #ifdef CC_TIMING double t_uncond_hook = MPI_Wtime() - t1; t1 = MPI_Wtime(); #endif if(iteration > 1) { StarCheckAfterHooking(Abool, parent, stars, uncondHooks, false); stars.Apply([](short isStar){return isStar==STAR? CONVERGED: isStar;}); } else { // explain stars.EWiseApply(uncondHooks, [](short isStar, IT x){return static_cast<short>(NONSTAR);}, false, static_cast<IT>(NONSTAR)); } IT nconverged = stars.Reduce(std::plus<IT>(), static_cast<IT>(0), [](short isStar){return static_cast<IT>(isStar==CONVERGED);}); if(nconverged==nrows) { outs.clear(); outs << "Iteration: " << iteration << " converged: " << nrows << " stars: 0" << " nonstars: 0" ; outs<< endl; SpParHelper::Print(outs.str()); break; } #ifdef CC_TIMING double t_starcheck2 = MPI_Wtime() - t1; t1 = MPI_Wtime(); #endif Shortcut(parent, stars); #ifdef CC_TIMING double t_shortcut = MPI_Wtime() - t1; t1 = MPI_Wtime(); #endif StarCheck(parent, stars); #ifdef CC_TIMING double t_starcheck = MPI_Wtime() - t1; t1 = MPI_Wtime(); #endif IT nonstars = stars.Reduce(std::plus<IT>(), static_cast<IT>(0), [](short isStar){return static_cast<IT>(isStar==NONSTAR);}); IT nstars = nrows - (nonstars + nconverged); double t2 = MPI_Wtime(); outs.str(""); outs.clear(); outs << "Iteration: " << iteration << " converged: " << nconverged << " stars: " << nstars << " nonstars: " << nonstars; #ifdef CC_TIMING //outs << " Time: t_cond_hook: " << t_cond_hook << " t_starcheck1: " << t_starcheck1 << " t_uncond_hook: " << t_uncond_hook << " t_starcheck2: " << t_starcheck2 << " t_shortcut: " << t_shortcut << " t_starcheck: " << t_starcheck; #endif outs << endl; SpParHelper::Print(outs.str()); iteration++; } FullyDistVec<IT, IT> cc(parent.getcommgrid()); nCC = LabelCC(parent, cc); //Correctness(A, cc, nCC, parent); return cc; } template <typename IT> void PrintCC(FullyDistVec<IT, IT> CC, IT nCC) { for(IT i=0; i< nCC; i++) { FullyDistVec<IT, IT> ith = CC.FindInds(bind2nd(std::equal_to<IT>(), i)); ith.DebugPrint(); } } // Print the size of the first 4 clusters template <typename IT> void First4Clust(FullyDistVec<IT, IT>& cc) { FullyDistSpVec<IT, IT> cc1 = cc.Find([](IT label){return label==0;}); FullyDistSpVec<IT, IT> cc2 = cc.Find([](IT label){return label==1;}); FullyDistSpVec<IT, IT> cc3 = cc.Find([](IT label){return label==2;}); FullyDistSpVec<IT, IT> cc4 = cc.Find([](IT label){return label==3;}); std::ostringstream outs; outs.str(""); outs.clear(); outs << "Size of the first component: " << cc1.getnnz() << std::endl; outs << "Size of the second component: " << cc2.getnnz() << std::endl; outs << "Size of the third component: " << cc3.getnnz() << std::endl; outs << "Size of the fourth component: " << cc4.getnnz() << std::endl; SpParHelper::Print(outs.str()); } template <typename IT> void HistCC(FullyDistVec<IT, IT> CC, IT nCC) { FullyDistVec<IT, IT> ccSizes(CC.getcommgrid(), nCC, 0); for(IT i = 0; i < nCC; i++) { FullyDistSpVec<IT, IT> ith = CC.Find(bind2nd(std::equal_to<IT>(), i)); ccSizes.SetElement(i, ith.getnnz()); } IT largestCCSise = ccSizes.Reduce(maximum<IT>(), static_cast<IT>(0)); const IT * locCCSizes = ccSizes.GetLocArr(); int numBins = 200; std::vector<IT> localHist(numBins,0); for(IT i=0; i< ccSizes.LocArrSize(); i++) { IT bin = (locCCSizes[i]*(numBins-1))/largestCCSise; localHist[bin]++; } std::vector<IT> globalHist(numBins,0); MPI_Comm world = CC.getcommgrid()->GetWorld(); MPI_Reduce(localHist.data(), globalHist.data(), numBins, MPIType<IT>(), MPI_SUM, 0, world); int myrank; MPI_Comm_rank(world,&myrank); if(myrank==0) { std::cout << "The largest component size: " << largestCCSise << std::endl; std::ofstream output; output.open("hist.txt", std::ios_base::app ); std::copy(globalHist.begin(), globalHist.end(), std::ostream_iterator<IT> (output, " ")); output << std::endl; output.close(); } } }
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info, MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) ResetMagickMemory(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AcquireSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; (void) AcquireMagickResource(MemoryResource,matrix_info->length); matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) { matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). % Also represents the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usally 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', % you can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) ResetMagickMemory(columns,0,rank*sizeof(*columns)); (void) ResetMagickMemory(rows,0,rank*sizeof(*rows)); (void) ResetMagickMemory(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor, value; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&value); min_value=value; max_value=value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL,exception); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); *q=ClampToQuantum(value); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the ResetMagickMemory method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) ResetMagickMemory(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
dotMultiply.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(dotMultiply) (const dlong & N, const pfloat * __restrict__ w, const pfloat * __restrict__ v, pfloat * __restrict__ wv){ #ifdef __NEKRS__OMP__ #pragma omp parallel for #endif for(dlong n=0;n<N;++n){ wv [n] = w[n]*v[n]; } }
GB_unop__identity_fc64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_fc32) // op(A') function: GB (_unop_tran__identity_fc64_fc32) // C type: GxB_FC64_t // A type: GxB_FC32_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_fc32) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd64_transform_input_pack4_msa(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { v4f32 _val = (v4f32)__msa_ld_w(r0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); v4f32 _sum8 = (v4f32)__msa_fill_w(0); v4f32 _sum9 = (v4f32)__msa_fill_w(0); v4f32 _suma = (v4f32)__msa_fill_w(0); v4f32 _sumb = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 48); __builtin_prefetch(k0 + 16); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4i32 _val89ab = __msa_ld_w(r0 + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); r0 += 12; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); __msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0); __msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0); __msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0); __msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 16); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); r0 += 8; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(k0 + 16); v4i32 _val0123 = __msa_ld_w(r0, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); r0 += 4; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 8); __builtin_prefetch(k0 + 16); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _val1 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 4); __builtin_prefetch(k0 + 16); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); k0 += 4; } __msa_st_w((v4i32)_sum, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd64_transform_output_pack4_msa(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd42_transform_input_pack4_msa(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { v4f32 _val = (v4f32)__msa_ld_w(r0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); v4f32 _sum8 = (v4f32)__msa_fill_w(0); v4f32 _sum9 = (v4f32)__msa_fill_w(0); v4f32 _suma = (v4f32)__msa_fill_w(0); v4f32 _sumb = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 48); __builtin_prefetch(k0 + 16); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4i32 _val89ab = __msa_ld_w(r0 + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); r0 += 12; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); __msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0); __msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0); __msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0); __msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 16); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); r0 += 8; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(k0 + 16); v4i32 _val0123 = __msa_ld_w(r0, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); r0 += 4; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 8); __builtin_prefetch(k0 + 16); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _val1 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 4); __builtin_prefetch(k0 + 16); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); k0 += 4; } __msa_st_w((v4i32)_sum, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd42_transform_output_pack4_msa(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
lrthresh.c
/* Copyright 2015. The Regents of the University of California. * Copyright 2015. Tao Zhang and Joseph Cheng. * Copyright 2016-2018. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014-2015 Frank Ong <frankong@berkeley.edu> * 2014 Tao Zhang * 2014 Joseph Cheng * 2014 Jon Tamir * 2014-2018 Martin Uecker */ #include <stdlib.h> #include <complex.h> #include <math.h> #include "misc/misc.h" #include "misc/mri.h" #include "misc/debug.h" #include "num/multind.h" #include "num/flpmath.h" #include "num/linalg.h" #include "num/ops.h" #include "num/ops_p.h" #include "num/blockproc.h" #include "num/casorati.h" #include "iter/thresh.h" #include "lowrank/batchsvd.h" #include "lowrank/svthresh.h" #include "lrthresh.h" struct lrthresh_data_s { INTERFACE(operator_data_t); float lambda; bool randshift; bool noise; int remove_mean; long strs_lev[DIMS]; long strs[DIMS]; long dims_decom[DIMS]; long dims[DIMS]; unsigned long mflags; unsigned long flags; long levels; long blkdims[MAX_LEV][DIMS]; bool overlapping_blocks; }; static DEF_TYPEID(lrthresh_data_s); static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean, bool overlapping_blocks); static void lrthresh_free_data(const operator_data_t* data); static void lrthresh_apply(const operator_data_t* _data, float lambda, complex float* dst, const complex float* src); /** * Intialize lrthresh operator * * @param dims_decom - decomposition dimensions * @param randshift - randshift boolean * @param mflags - selects which dimensions gets reshaped as the first dimension in matrix * @param blkdims - contains block dimensions for all levels * */ const struct operator_p_s* lrthresh_create(const long dims_lev[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean, bool overlapping_blocks) { struct lrthresh_data_s* data = lrthresh_create_data(dims_lev, randshift, mflags, blkdims, lambda, noise, remove_mean, overlapping_blocks); return operator_p_create(DIMS, dims_lev, DIMS, dims_lev, CAST_UP(data), lrthresh_apply, lrthresh_free_data); } /** * Intialize lrthresh data * * @param dims_decom - dimensions with levels at LEVEL_DIMS * @param randshift - randshift boolean * @param mflags - selects which dimensions gets reshaped as the first dimension in matrix * @param blkdims - contains block dimensions for all levels * */ static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean, bool overlapping_blocks) { PTR_ALLOC(struct lrthresh_data_s, data); SET_TYPEID(lrthresh_data_s, data); data->randshift = randshift; data->mflags = mflags; data->lambda = lambda; data->noise = noise; data->remove_mean = remove_mean; data->overlapping_blocks = overlapping_blocks; // level dimensions md_copy_dims(DIMS, data->dims_decom, dims_decom); md_calc_strides(DIMS, data->strs_lev, dims_decom, CFL_SIZE); // image dimensions data->levels = dims_decom[LEVEL_DIM]; md_select_dims(DIMS, ~LEVEL_FLAG, data->dims, dims_decom); md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); // blkdims for(long l = 0; l < data->levels; l++) { for (long i = 0; i < DIMS; i++) data->blkdims[l][i] = blkdims[l][i]; } return PTR_PASS(data); } /** * Free lrthresh operator */ static void lrthresh_free_data(const operator_data_t* _data) { xfree(CAST_DOWN(lrthresh_data_s, _data)); } /* * Return a random number between 0 and limit inclusive. */ static int rand_lim(int limit) { int divisor = RAND_MAX / (limit + 1); int retval; do { retval = rand() / divisor; } while (retval > limit); return retval; } /* * Low rank threhsolding for arbitrary block sizes */ static void lrthresh_apply(const operator_data_t* _data, float mu, complex float* dst, const complex float* src) { auto data = CAST_DOWN(lrthresh_data_s, _data); float lambda = mu * data->lambda; long strs1[DIMS]; md_calc_strides(DIMS, strs1, data->dims_decom, 1); //#pragma omp parallel for for (int l = 0; l < data->levels; l++) { complex float* dstl = dst + l * strs1[LEVEL_DIM]; const complex float* srcl = src + l * strs1[LEVEL_DIM]; long blkdims[DIMS]; long shifts[DIMS]; long unshifts[DIMS]; long zpad_dims[DIMS]; long M = 1; for (unsigned int i = 0; i < DIMS; i++) { blkdims[i] = data->blkdims[l][i]; zpad_dims[i] = (data->dims[i] + blkdims[i] - 1) / blkdims[i]; zpad_dims[i] *= blkdims[i]; if (MD_IS_SET(data->mflags, i)) M *= blkdims[i]; if (data->randshift) shifts[i] = rand_lim(MIN(blkdims[i] - 1, zpad_dims[i] - blkdims[i])); else shifts[i] = 0; unshifts[i] = -shifts[i]; } long zpad_strs[DIMS]; md_calc_strides(DIMS, zpad_strs, zpad_dims, CFL_SIZE); long blk_size = md_calc_size(DIMS, blkdims); long img_size = md_calc_size(DIMS, zpad_dims); long N = blk_size / M; long B = img_size / blk_size; if (data->noise && (l == data->levels - 1)) { M = img_size; N = 1; B = 1; } complex float* tmp = md_alloc_sameplace(DIMS, zpad_dims, CFL_SIZE, dst); md_circ_ext(DIMS, zpad_dims, tmp, data->dims, srcl, CFL_SIZE); md_circ_shift(DIMS, zpad_dims, shifts, tmp, tmp, CFL_SIZE); long mat_dims[2]; (data->overlapping_blocks ? casorati_dims : basorati_dims)(DIMS, mat_dims, blkdims, zpad_dims); complex float* tmp_mat = md_alloc_sameplace(2, mat_dims, CFL_SIZE, dst); complex float* tmp_mat2 = tmp_mat; // Reshape image into a blk_size x number of blocks matrix (data->overlapping_blocks ? casorati_matrix : basorati_matrix)(DIMS, blkdims, mat_dims, tmp_mat, zpad_dims, zpad_strs, tmp); long num_blocks = mat_dims[1]; long mat2_dims[2] = { mat_dims[0], mat_dims[1] }; // FIXME: casorati and basorati are transposes of each other if (data->overlapping_blocks) { mat2_dims[0] = mat_dims[1]; mat2_dims[1] = mat_dims[0]; tmp_mat2 = md_alloc_sameplace(2, mat2_dims, CFL_SIZE, dst); md_transpose(2, 0, 1, mat2_dims, tmp_mat2, mat_dims, tmp_mat, CFL_SIZE); num_blocks = mat2_dims[1]; if (B > 1) B = mat2_dims[1]; } debug_printf(DP_DEBUG4, "M=%d, N=%d, B=%d, num_blocks=%d, img_size=%d, blk_size=%d\n", M, N, B, num_blocks, img_size, blk_size); batch_svthresh(M, N, num_blocks, lambda * GWIDTH(M, N, B), *(complex float (*)[mat2_dims[1]][M][N])tmp_mat2); // for ( int b = 0; b < mat_dims[1]; b++ ) // svthresh(M, N, lambda * GWIDTH(M, N, B), tmp_mat, tmp_mat); if (data->overlapping_blocks) { md_transpose(2, 0, 1, mat_dims, tmp_mat, mat2_dims, tmp_mat2, CFL_SIZE); } (data->overlapping_blocks ? casorati_matrixH : basorati_matrixH)(DIMS, blkdims, zpad_dims, zpad_strs, tmp, mat_dims, tmp_mat); if (data->overlapping_blocks) { md_zsmul(DIMS, zpad_dims, tmp, tmp, 1. / M); md_free(tmp_mat2); } md_circ_shift(DIMS, zpad_dims, unshifts, tmp, tmp, CFL_SIZE); md_resize(DIMS, data->dims, dstl, zpad_dims, tmp, CFL_SIZE); md_free(tmp); md_free(tmp_mat); } } /* * Nuclear norm calculation for arbitrary block sizes */ float lrnucnorm(const struct operator_p_s* op, const complex float* src) { struct lrthresh_data_s* data = (struct lrthresh_data_s*)operator_p_get_data(op); long strs1[DIMS]; md_calc_strides(DIMS, strs1, data->dims_decom, 1); float nnorm = 0.; for (int l = 0; l < data->levels; l++) { const complex float* srcl = src + l * strs1[LEVEL_DIM]; long blkdims[DIMS]; long blksize = 1; for (unsigned int i = 0; i < DIMS; i++) { blkdims[i] = data->blkdims[l][i]; blksize *= blkdims[i]; } if (1 == blksize) { for (long j = 0; j < md_calc_size(DIMS, data->dims); j++) nnorm += 2 * cabsf(srcl[j]); continue; } struct svthresh_blockproc_data* svdata = svthresh_blockproc_create(data->mflags, 0., 0); complex float* tmp = md_alloc_sameplace(DIMS, data->dims, CFL_SIZE, src); //debug_print_dims(DP_DEBUG1, DIMS, data->dims); md_copy(DIMS, data->dims, tmp, srcl, CFL_SIZE); // Block SVD Threshold nnorm = blockproc(DIMS, data->dims, blkdims, (void*)svdata, nucnorm_blockproc, tmp, tmp); xfree(svdata); md_free(tmp); } return nnorm; } /************* * Block dimensions functions *************/ /** * Generates multiscale low rank block sizes * * @param blkdims - block sizes to be written * @param flags - specifies which dimensions to do the blocks. The other dimensions will be the same as input * @param idims - input dimensions * @param blkskip - scale each level by blkskip to generate the next level * * returns number of levels */ long multilr_blkdims(long blkdims[MAX_LEV][DIMS], unsigned long flags, const long idims[DIMS], int blkskip, long initblk) { // Multiscale low rank block sizes long tmp_block[DIMS]; for (unsigned int i = 0; i < DIMS; i++) { if (MD_IS_SET(flags, i)) tmp_block[i] = MIN(initblk, idims[i]); else tmp_block[i] = idims[i]; } bool done; // Loop block_sizes long levels = 0; do { levels++; debug_printf(DP_INFO, "[\t"); for (unsigned int i = 0; i < DIMS; i++) { blkdims[levels - 1][i] = tmp_block[i]; debug_printf(DP_INFO, "%ld\t", blkdims[levels-1][i]); } debug_printf(DP_INFO, "]\n"); done = true; for (unsigned int i = 0; i < DIMS; i++) { if (MD_IS_SET(flags, i) && (idims[i] != 1)) { tmp_block[i] = MIN(tmp_block[i] * blkskip, idims[i]); done = done && (blkdims[levels - 1][i] == idims[i]); } } } while(!done); return levels; } void add_lrnoiseblk(long* levels, long blkdims[MAX_LEV][DIMS], const long idims[DIMS]) { levels[0]++; debug_printf(DP_DEBUG1, "[\t"); for (unsigned int i = 0; i < DIMS; i++) { blkdims[levels[0] - 1][i] = idims[i]; debug_printf(DP_DEBUG1, "%ld\t", blkdims[levels[0] - 1][i]); } debug_printf(DP_DEBUG1, "]\n"); } /** * Generates locally low rank block sizes * * @param blkdims - block sizes to be written * @param flags - specifies which dimensions to do the blocks. The other dimensions will be the same as input * @param idims - input dimensions * @param llkblk - the block size * * returns number of levels = 1 */ long llr_blkdims(long blkdims[MAX_LEV][DIMS], unsigned long flags, const long idims[DIMS], long llrblk) { for (unsigned int i = 0; i < DIMS; i++) { if (MD_IS_SET(flags, i)) blkdims[0][i] = MIN(llrblk, idims[i]); else blkdims[0][i] = idims[i]; } return 1; } /** * Generates low rank + sparse block sizes * * @param blkdims - block sizes to be written * @param idims - input dimensions * * returns number of levels = 2 */ long ls_blkdims(long blkdims[MAX_LEV][DIMS], const long idims[DIMS]) { for (unsigned int i = 0; i < DIMS; i++) { blkdims[0][i] = 1; blkdims[1][i] = idims[i]; } return 2; } float get_lrthresh_lambda(const struct operator_p_s* o) { auto data = CAST_DOWN(lrthresh_data_s, operator_p_get_data(o)); return data->lambda; }
HYPRE_IJMatrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * HYPRE_IJMatrix interface * *****************************************************************************/ #include "./_hypre_IJ_mv.h" #include "../HYPRE.h" /*-------------------------------------------------------------------------- * HYPRE_IJMatrixCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixCreate( MPI_Comm comm, HYPRE_BigInt ilower, HYPRE_BigInt iupper, HYPRE_BigInt jlower, HYPRE_BigInt jupper, HYPRE_IJMatrix *matrix ) { HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; HYPRE_BigInt *info; HYPRE_Int num_procs; HYPRE_Int myid; hypre_IJMatrix *ijmatrix; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt row0, col0, rowN, colN; #else HYPRE_BigInt *recv_buf; HYPRE_Int i, i4; HYPRE_Int square; #endif ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ijmatrix) = comm; hypre_IJMatrixObject(ijmatrix) = NULL; hypre_IJMatrixTranslator(ijmatrix) = NULL; hypre_IJMatrixAssumedPart(ijmatrix) = NULL; hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED; hypre_IJMatrixAssembleFlag(ijmatrix) = 0; hypre_IJMatrixPrintLevel(ijmatrix) = 0; hypre_IJMatrixOMPFlag(ijmatrix) = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &myid); if (ilower > iupper+1 || ilower < 0) { hypre_error_in_arg(2); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (iupper < -1) { hypre_error_in_arg(3); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jlower > jupper+1 || jlower < 0) { hypre_error_in_arg(4); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jupper < -1) { hypre_error_in_arg(5); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } #ifdef HYPRE_NO_GLOBAL_PARTITION info = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); row_partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); row_partitioning[0] = ilower; row_partitioning[1] = iupper+1; col_partitioning[0] = jlower; col_partitioning[1] = jupper+1; /* now we need the global number of rows and columns as well as the global first row and column index */ /* proc 0 has the first row and col */ if (myid==0) { info[0] = ilower; info[1] = jlower; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm); row0 = info[0]; col0 = info[1]; /* proc (num_procs-1) has the last row and col */ if (myid == (num_procs-1)) { info[0] = iupper; info[1] = jupper; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs-1, comm); rowN = info[0]; colN = info[1]; hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0; hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0; hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1; hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1; hypre_TFree(info, HYPRE_MEMORY_HOST); #else info = hypre_CTAlloc(HYPRE_BigInt, 4, HYPRE_MEMORY_HOST); recv_buf = hypre_CTAlloc(HYPRE_BigInt, 4*num_procs, HYPRE_MEMORY_HOST); row_partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); info[0] = ilower; info[1] = iupper; info[2] = jlower; info[3] = jupper; /* Generate row- and column-partitioning through information exchange across all processors, check whether the matrix is square, and if the partitionings match. i.e. no overlaps or gaps, if there are overlaps or gaps in the row partitioning or column partitioning , ierr will be set to -9 or -10, respectively */ hypre_MPI_Allgather(info,4,HYPRE_MPI_BIG_INT,recv_buf,4,HYPRE_MPI_BIG_INT,comm); row_partitioning[0] = recv_buf[0]; square = 1; for (i=0; i < num_procs-1; i++) { i4 = 4*i; if ( recv_buf[i4+1] != (recv_buf[i4+4]-1) ) { hypre_error(HYPRE_ERROR_GENERIC); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); hypre_TFree(info, HYPRE_MEMORY_HOST); hypre_TFree(recv_buf, HYPRE_MEMORY_HOST); hypre_TFree(row_partitioning, HYPRE_MEMORY_HOST); return hypre_error_flag; } else { row_partitioning[i+1] = recv_buf[i4+4]; } if ((square && (recv_buf[i4] != recv_buf[i4+2])) || (recv_buf[i4+1] != recv_buf[i4+3]) ) { square = 0; } } i4 = (num_procs-1)*4; row_partitioning[num_procs] = recv_buf[i4+1]+1; if ((recv_buf[i4] != recv_buf[i4+2]) || (recv_buf[i4+1] != recv_buf[i4+3])) { square = 0; } if (square) { col_partitioning = row_partitioning; } else { col_partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); col_partitioning[0] = recv_buf[2]; for (i=0; i < num_procs-1; i++) { i4 = 4*i; if (recv_buf[i4+3] != recv_buf[i4+6]-1) { hypre_error(HYPRE_ERROR_GENERIC); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); hypre_TFree(info, HYPRE_MEMORY_HOST); hypre_TFree(recv_buf, HYPRE_MEMORY_HOST); hypre_TFree(row_partitioning, HYPRE_MEMORY_HOST); hypre_TFree(col_partitioning, HYPRE_MEMORY_HOST); return hypre_error_flag; } else { col_partitioning[i+1] = recv_buf[i4+6]; } } col_partitioning[num_procs] = recv_buf[num_procs*4-1]+1; } hypre_IJMatrixGlobalFirstRow(ijmatrix) = row_partitioning[0]; hypre_IJMatrixGlobalFirstCol(ijmatrix) = col_partitioning[0]; hypre_IJMatrixGlobalNumRows(ijmatrix) = row_partitioning[num_procs] - row_partitioning[0]; hypre_IJMatrixGlobalNumCols(ijmatrix) = col_partitioning[num_procs] - col_partitioning[0]; hypre_TFree(info, HYPRE_MEMORY_HOST); hypre_TFree(recv_buf, HYPRE_MEMORY_HOST); #endif hypre_IJMatrixRowPartitioning(ijmatrix) = row_partitioning; hypre_IJMatrixColPartitioning(ijmatrix) = col_partitioning; *matrix = (HYPRE_IJMatrix) ijmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (ijmatrix) { if (hypre_IJMatrixRowPartitioning(ijmatrix) == hypre_IJMatrixColPartitioning(ijmatrix)) { hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix), HYPRE_MEMORY_HOST); } else { hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix), HYPRE_MEMORY_HOST); hypre_TFree(hypre_IJMatrixColPartitioning(ijmatrix), HYPRE_MEMORY_HOST); } if hypre_IJMatrixAssumedPart(ijmatrix) { hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix)); } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixDestroyParCSR( ijmatrix ); } else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 ) { hypre_error_in_arg(1); return hypre_error_flag; } } hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixInitializeParCSR( ijmatrix ) ; } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix, HYPRE_Int print_level ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixPrintLevel(ijmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * This is a helper routine to compute a prefix sum of integer values. * * The current implementation is okay for modest numbers of threads. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PrefixSumInt(HYPRE_Int nvals, HYPRE_Int *vals, HYPRE_Int *sums) { HYPRE_Int j, nthreads, bsize; nthreads = hypre_NumThreads(); bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */ if (nvals < nthreads || bsize == 1) { sums[0] = 0; for (j=1; j < nvals; j++) sums[j] += sums[j-1] + vals[j-1]; } else { /* Compute preliminary partial sums (in parallel) within each interval */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j+bsize), nvals); sums[0] = 0; for (i = j+1; i < n; i++) { sums[i] = sums[i-1] + vals[i-1]; } } /* Compute final partial sums (in serial) for the first entry of every interval */ for (j = bsize; j < nvals; j += bsize) { sums[j] = sums[j-bsize] + sums[j-1] + vals[j-1]; } /* Compute final partial sums (in parallel) for the remaining entries */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = bsize; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j+bsize), nvals); for (i = j+1; i < n; i++) { sums[i] += sums[j]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; HYPRE_Int *row_indexes; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } /* Compute row_indexes and call Values2 routine (TODO: add OpenMP)*/ row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols, row_indexes); HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, row_indexes, cols, values); hypre_TFree(row_indexes, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; HYPRE_Int *row_indexes; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } /* Compute row_indexes and call Values2 routine */ row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols, row_indexes); HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, row_indexes, cols, values); hypre_TFree(row_indexes, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!row_indexes) { hypre_error_in_arg(5); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values); } else { hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!row_indexes) { hypre_error_in_arg(5); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values); } else { hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixAssembleParCSR( ijmatrix ) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(3); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(4); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols, rows, cols, values ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix, HYPRE_Int type ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixObjectType(ijmatrix) = type; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix, HYPRE_Int *type ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *type = hypre_IJMatrixObjectType(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix, HYPRE_BigInt *ilower, HYPRE_BigInt *iupper, HYPRE_BigInt *jlower, HYPRE_BigInt *jupper ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; MPI_Comm comm; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; HYPRE_Int my_id; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_IJMatrixComm(ijmatrix); row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix); col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION *ilower = row_partitioning[0]; *iupper = row_partitioning[1]-1; *jlower = col_partitioning[0]; *jupper = col_partitioning[1]-1; #else *ilower = row_partitioning[my_id]; *iupper = row_partitioning[my_id+1]-1; *jlower = col_partitioning[my_id]; *jupper = col_partitioning[my_id+1]-1; #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /** Returns a pointer to an underlying ijmatrix type used to implement IJMatrix. Assumes that the implementation has an underlying matrix, so it would not work with a direct implementation of IJMatrix. @return integer error code @param IJMatrix [IN] The ijmatrix to be pointed to. */ HYPRE_Int HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix, void **object ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *object = hypre_IJMatrixObject( ijmatrix ); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix, const HYPRE_Int *sizes ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixSetRowSizesParCSR( ijmatrix , sizes ) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offdiag_sizes ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix, HYPRE_Int max_off_proc_elmts) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix, max_off_proc_elmts) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixRead( const char *filename, MPI_Comm comm, HYPRE_Int type, HYPRE_IJMatrix *matrix_ptr ) { HYPRE_IJMatrix matrix; HYPRE_BigInt ilower, iupper, jlower, jupper; HYPRE_BigInt I, J; HYPRE_Int ncols; HYPRE_Complex value; HYPRE_Int myid, ret; char new_filename[255]; FILE *file; hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix); HYPRE_IJMatrixSetObjectType(matrix, type); HYPRE_IJMatrixInitialize(matrix); /* It is important to ensure that whitespace follows the index value to help * catch mistakes in the input file. See comments in IJVectorRead(). */ ncols = 1; while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF ) { if (ret != 3) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file."); return hypre_error_flag; } if (I < ilower || I > iupper) { HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value); } else { HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value); } } HYPRE_IJMatrixAssemble(matrix); fclose(file); *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix, const char *filename ) { MPI_Comm comm; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; HYPRE_BigInt ilower, iupper, jlower, jupper; HYPRE_BigInt i, ii; HYPRE_Int j; HYPRE_Int ncols; HYPRE_BigInt *cols; HYPRE_Complex *values; HYPRE_Int myid; char new_filename[255]; FILE *file; void *object; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) ) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_in_arg(2); return hypre_error_flag; } row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION ilower = row_partitioning[0]; iupper = row_partitioning[1] - 1; jlower = col_partitioning[0]; jupper = col_partitioning[1] - 1; #else ilower = row_partitioning[myid]; iupper = row_partitioning[myid+1] - 1; jlower = col_partitioning[myid]; jupper = col_partitioning[myid+1] - 1; #endif hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); HYPRE_IJMatrixGetObject(matrix, &object); for (i = ilower; i <= iupper; i++) { if ( hypre_IJMatrixObjectType(matrix) == HYPRE_PARCSR ) { #ifdef HYPRE_NO_GLOBAL_PARTITION ii = i - hypre_IJMatrixGlobalFirstRow(matrix); #else ii = i - row_partitioning[0]; #endif HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) object, ii, &ncols, &cols, &values); for (j = 0; j < ncols; j++) { #ifdef HYPRE_NO_GLOBAL_PARTITION cols[j] += hypre_IJMatrixGlobalFirstCol(matrix); #else cols[j] += col_partitioning[0]; #endif } } for (j = 0; j < ncols; j++) { hypre_fprintf(file, "%b %b %.14e\n", i, cols[j], values[j]); } if ( hypre_IJMatrixObjectType(matrix) == HYPRE_PARCSR ) { for (j = 0; j < ncols; j++) { #ifdef HYPRE_NO_GLOBAL_PARTITION cols[j] -= hypre_IJMatrixGlobalFirstCol(matrix); #else cols[j] -= col_partitioning[0]; #endif } HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) object, ii, &ncols, &cols, &values); } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix, HYPRE_Int omp_flag ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag; return hypre_error_flag; }
perm.c
#include "ghost/context.h" #include "ghost/locality.h" #include "ghost/util.h" typedef struct { ghost_gidx idx, pidx; } ghost_permutation_ent_t; static int perm_ent_cmp(const void *a, const void *b) { return ((ghost_permutation_ent_t *)a)->pidx - ((ghost_permutation_ent_t *)b)->pidx; } ghost_error ghost_global_perm_inv(ghost_gidx *toPerm, ghost_gidx *fromPerm, ghost_context *context) { #ifdef GHOST_HAVE_MPI ghost_mpi_datatype ghost_mpi_dt_perm; MPI_CALL_RETURN(MPI_Type_contiguous(2,ghost_mpi_dt_gidx,&ghost_mpi_dt_perm)); MPI_CALL_RETURN(MPI_Type_commit(&ghost_mpi_dt_perm)); #endif ghost_lidx i; int proc, me, nprocs; ghost_rank(&me,context->mpicomm); ghost_nrank(&nprocs,context->mpicomm); ghost_permutation_ent_t *permclone; ghost_malloc((void **)&permclone,sizeof(ghost_permutation_ent_t)*context->row_map->ldim[me]); #pragma omp parallel for for (i=0; i<context->row_map->ldim[me]; i++) { permclone[i].idx = context->row_map->goffs[me]+i; permclone[i].pidx = fromPerm[i]; } qsort(permclone,context->row_map->ldim[me],sizeof(ghost_permutation_ent_t),perm_ent_cmp); // permclone is now sorted by ascending pidx ghost_lidx offs = 0; for (proc = 0; proc<nprocs; proc++) { int displ[nprocs]; int nel[nprocs]; int recvdispl[nprocs]; memset(displ,0,sizeof(displ)); memset(nel,0,sizeof(nel)); // find 1st pidx in sorted permclone which lies in process proc while((offs < context->row_map->ldim[me]) && (permclone[offs].pidx < context->row_map->goffs[proc])) { offs++; } displ[me] = offs; // find last pidx in sorted permclone which lies in process proc while((offs < context->row_map->ldim[me]) && (permclone[offs].pidx < context->row_map->goffs[proc]+context->row_map->ldim[proc])) { offs++; } nel[me] = offs-displ[me]; #ifdef GHOST_HAVE_MPI // proc needs to know how many elements to receive from each process if (proc == me) { MPI_Reduce(MPI_IN_PLACE,nel,nprocs,MPI_INT,MPI_MAX,proc,context->mpicomm); } else { MPI_Reduce(nel,NULL,nprocs,MPI_INT,MPI_MAX,proc,context->mpicomm); } #endif // assemble receive displacements if (proc == me) { recvdispl[0] = 0; for (i=1; i<nprocs; i++) { recvdispl[i] = recvdispl[i-1] + nel[i-1]; } } // prepare receive buffer ghost_permutation_ent_t *recvbuf = NULL; if (proc == me) { ghost_malloc((void **)&recvbuf,context->row_map->ldim[me]*sizeof(ghost_permutation_ent_t)); } #ifdef GHOST_HAVE_MPI // gather local invPerm MPI_Gatherv(&permclone[displ[me]],nel[me],ghost_mpi_dt_perm,recvbuf,nel,recvdispl,ghost_mpi_dt_perm,proc,context->mpicomm); #else memcpy(recvbuf,&permclone[displ[me]],nel[me]*sizeof(ghost_permutation_ent_t)); #endif if (proc == me) { // sort the indices and put them into the invPerm array qsort(recvbuf,context->row_map->ldim[me],sizeof(ghost_permutation_ent_t),perm_ent_cmp); for (i=0; i<context->row_map->ldim[me]; i++) { toPerm[i] = recvbuf[i].idx; } } if (proc == me) { free(recvbuf); } } free(permclone); #ifdef GHOST_HAVE_MPI MPI_CALL_RETURN(MPI_Type_free(&ghost_mpi_dt_perm)); #endif return GHOST_SUCCESS; }
solution.c
#include "simd.h" #include <stdio.h> #include <stdlib.h> void solve(int W, int H, int N, float *input, float *output) { const int CHUNK_SIZE = 64; #pragma omp parallel for for (int i = 0; i < H; i += CHUNK_SIZE) { float *tmp_buf = (float *)malloc(sizeof(float) * W * (CHUNK_SIZE + N - 1)); for (int ii = 0; ii < CHUNK_SIZE + N - 1 && i + ii < H + N - 1; ++ii) { float tmp = 0; for (int jj = 0; jj < N; ++jj) tmp += input[(i + ii) * (W + N - 1) + jj]; tmp_buf[ii * W] = tmp; for (int j = 1; j < W; ++j) tmp_buf[ii * W + j] = tmp_buf[ii * W + (j - 1)] - input[(i + ii) * (W + N - 1) + (j - 1)] + input[(i + ii) * (W + N - 1) + j + N - 1]; for (int j = 0; j < W; ++j) tmp_buf[ii * W + j] /= N; } for (int j = 0; j < W; ++j) { float tmp = 0; for (int jj = 0; jj < N; ++jj) tmp += tmp_buf[jj * W + j]; output[i * W + j] = tmp / N; } for (int ii = 1; ii < CHUNK_SIZE && i + ii < H; ++ii) for (int j = 0; j < W; ++j) output[(i + ii) * W + j] = (output[(i + ii - 1) * W + j] * N - tmp_buf[(ii - 1) * W + j] + tmp_buf[(ii + N - 1) * W + j]) / N; free(tmp_buf); } }
O7precIndxNb.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_grad; extern GVAL *restrict * restrict * restrict gv_precInd; extern int *restrict t7Blk; extern int *restrict t7Ind; void O7precIndxNb(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { int jb = t7Blk[block_index * g->blkSize + edge_index], je = t7Ind[block_index * g->blkSize + edge_index]; gv_precInd[jb][height_index][je] = 0.5 * gv_grad->data_pointer.p3[(block_index)][(height_index)][(edge_index)]; } } } } }
shallow_water.c
/** * shallow_water.c * */ #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <string.h> #include <unistd.h> #include <sys/time.h> #include <mkl.h> #include <vec.h> #include <composer.h> #include <omp.h> #include "shallow_water.h" #include "shallow_water_mkl.h" #include "shallow_water_composer.h" typedef enum { UNKNOWN = 0, FUSED, MKL, MKL_COMPOSER, } exec_mode_t; // Piece size for pipelined execution. long piece_size = 4096; // Number of threads. long threads = 1; // Data size as a matrix dimension. size_t data_size = 4096L; // Number of iterations to run for. int iterations = 1; // Mode to use exec_mode_t mode; exec_mode_t get_mode(char *s) { if (strcmp("fused", s) == 0) { return FUSED; } else if (strcmp("mkl", s) == 0) { return MKL; } else if (strcmp("mklcomposer", s) == 0) { return MKL_COMPOSER; } else { return UNKNOWN; } } input_t inputs(long n, int lazy) { vec_t u = vvals(n*n, 0.0, lazy); vec_t v = vvals(n*n, 0.0, lazy); vec_t eta = vvals(n*n, 1.0, 0); // Initialize eta. for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { eta.data[n*i + j] = 0.1 * i; } } if (lazy) { composer_tolazy(eta.data); } input_t inp; inp.n = n; inp.u = u.data; inp.v = v.data; inp.eta = eta.data; inp.g = 1.0; inp.b = 0.0; inp.grid_spacing = 1.0 / n; inp.dt = inp.grid_spacing / 100.0; return inp; } inline int posmod(int i, int n) { return (i % n + n) % n; } void roll( // Inputs MKL_INT n, const double *restrict input, int axis, int amount, // Output double *restrict output) { if (amount == 0) { memcpy(output, input, sizeof(double) * n * n); } else if (axis == 1) { #pragma omp parallel for num_threads(8) for (int i = 0; i < n; i++) { const double *restrict input_row = input + i*n; double *restrict output_row = output + i*n; if (amount > 0) { memcpy(output_row + amount, input_row, sizeof(double) * (n - amount)); memcpy(output_row, input_row + (n - amount), sizeof(double) * amount); } else { amount = abs(amount); memcpy(output_row, input_row + amount, sizeof(double) * (n - amount)); memcpy(output_row + (n - amount), input_row, sizeof(double) * amount); } } } else if (axis == 0) { if (amount > 0) { memcpy(output + n * amount, input, sizeof(double) * n * (n - amount)); memcpy(output, input + n * (n - amount), sizeof(double) * n * amount); } else { amount = abs(amount); memcpy(output, input + n * amount, sizeof(double) * n * (n - amount)); memcpy(output + n * (n - amount), input, sizeof(double) * n * amount); } } else { fprintf(stderr, "invalid axis=%d in %s()", axis, __func__); exit(1); } } void print_matrix(int n, const double *v) { printf("-------------------\n"); for (int i = 0; i < n; i++) { printf("[ "); for (int j = 0; j < n; j++) { printf("%.5f ", v[i*n + j]); } printf("]\n"); } } int power_of_two(long x) { return x && !(x & (x - 1)); } void print_usage(char **argv) { fprintf(stderr, "%s -m <mode> [-t <threads> -p <piece size> -s <matrix width/length> -h]\n", argv[0]); fprintf(stderr, "Available modes:\n"); fprintf(stderr, "\tfused\n" "\tmkl\n" "\tmklcomposer\n" ); } void parse_args(int argc, char **argv) { int opt; while ((opt = getopt(argc, argv, "m:t:p:s:h:i:")) != -1) { switch (opt) { case 'm': mode = get_mode(optarg); if (mode == UNKNOWN) { print_usage(argv); exit(EXIT_FAILURE); } break; case 'p': piece_size = atol(optarg); break; case 't': threads = atol(optarg); if (!power_of_two(threads) || threads > 40) { fprintf(stderr, "threads must be power-of-2 and < 16\n"); exit(EXIT_FAILURE); } break; case 'i': iterations = atol(optarg); break; case 's': data_size = atol(optarg); break; case 'h': default: print_usage(argv); exit(EXIT_FAILURE); } } } int main(int argc, char **argv) { parse_args(argc, argv); if (mode == UNKNOWN) { print_usage(argv); exit(EXIT_FAILURE); } if (iterations <= 0) { fprintf(stderr, "iterations must be greater than 0.\n"); exit(EXIT_FAILURE); } // Need to call this before any of the other library functions. if (mode == MKL_COMPOSER) { composer_init(threads, piece_size); omp_set_num_threads(threads); mkl_set_num_threads(1); } else if (mode == MKL) { omp_set_num_threads(threads); mkl_set_num_threads(threads); } else { fprintf(stderr, "Unknown mode\n"); exit(1); } printf("Data Size: %ld Iterations: %d, Piece Size: %ld Threads: %ld Mode: %d\n", data_size, iterations, piece_size, threads, mode); // Generate inputs. fprintf(stderr, "Initializing..."); fflush(stdout); int lazy = (mode == MKL_COMPOSER); // Create inputs. input_t inp = inputs(data_size, lazy); fprintf(stderr, "done.\n"); fflush(stdout); fprintf(stderr, "Total working set bytes: %ld\n", data_size*data_size * sizeof(double) * 10); fprintf(stderr, "--------------------\n"); struct timeval start, end, diff; gettimeofday(&start, NULL); // Run function switch (mode) { case FUSED: fprintf(stderr, "unimplemented\n"); exit(1); break; case MKL: run_mkl(iterations, inp.n, inp.eta, inp.u, inp.v, inp.g, inp.b, inp.dt, inp.grid_spacing); break; case MKL_COMPOSER: run_mkl_composer(iterations, inp.n, inp.eta, inp.u, inp.v, inp.g, inp.b, inp.dt, inp.grid_spacing); break; case UNKNOWN: default: fprintf(stderr, "unsupported case"); exit(EXIT_FAILURE); } fprintf(stderr, "Evaluating lazy calls...\n"); fflush(stderr); gettimeofday(&end, NULL); timersub(&end, &start, &diff); double runtime = (double)diff.tv_sec + ((double)diff.tv_usec / 1000000.0); // Print the results. //print_matrix(inp.n, inp.eta); printf("First number: %f\n", inp.eta[0]); fprintf(stderr, "\n"); printf("%f seconds\n", runtime); fflush(stderr); fflush(stdout); }